code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def UpperCamelCase_ ( a_ ) ->list[int]:
A =len(a_ )
for i in range(a_ ):
for j in range(i + 1 , a_ ):
if numbers[j] < numbers[i]:
A , A =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__a = input("""Enter numbers separated by a comma:\n""").strip()
__a = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 689 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = """▁"""
__a = {"""vocab_file""": """sentencepiece.bpe.model"""}
__a = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
__a = {
"""facebook/mbart-large-en-ro""": 1_0_2_4,
"""facebook/mbart-large-cc25""": 1_0_2_4,
}
# fmt: off
__a = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = ["input_ids", "attention_mask"]
_A = []
_A = []
def __init__( self : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[int]="<s>" , snake_case__ : int="</s>" , snake_case__ : List[str]="</s>" , snake_case__ : List[str]="<s>" , snake_case__ : Optional[int]="<unk>" , snake_case__ : Tuple="<pad>" , snake_case__ : List[Any]="<mask>" , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None , snake_case__ : Optional[Dict[str, Any]] = None , snake_case__ : Optional[int]=None , **snake_case__ : str , ):
"""simple docstring"""
A =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
A ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , tokenizer_file=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
A =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
A =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A =1
A =len(self.sp_model )
A ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case__ )
}
A ={v: k for k, v in self.lang_code_to_id.items()}
A =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
A =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A =src_lang if src_lang is not None else "en_XX"
A =self.lang_code_to_id[self._src_lang]
A =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : int ):
"""simple docstring"""
A =self.__dict__.copy()
A =None
A =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
A =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A ={}
A =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a ( self : Any ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self : int ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self : Any , snake_case__ : str ):
"""simple docstring"""
A =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
A =[1] * len(self.prefix_tokens )
A =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case__ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case__ )) + ([0] * len(snake_case__ )) + suffix_ones
def _a ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
A =[self.sep_token_id]
A =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : Dict , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] , snake_case__ : Optional[str] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A =src_lang
A =self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
A =self.convert_tokens_to_ids(snake_case__ )
A =tgt_lang_id
return inputs
def _a ( self : str ):
"""simple docstring"""
A ={self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _a ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A =self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self : int , snake_case__ : str ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self : Optional[int] , snake_case__ : int ):
"""simple docstring"""
A ="".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _a ( self : int , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
A =self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _a ( self : List[Any] , snake_case__ : List[str] , snake_case__ : str = "en_XX" , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "ro_RO" , **snake_case__ : Tuple , ):
"""simple docstring"""
A =src_lang
A =tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def _a ( self : Any ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
A =self.lang_code_to_id[src_lang]
A =[]
A =[self.eos_token_id, self.cur_lang_code]
def _a ( self : List[Any] , snake_case__ : str ):
"""simple docstring"""
A =self.lang_code_to_id[lang]
A =[]
A =[self.eos_token_id, self.cur_lang_code]
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 1 |
def UpperCamelCase_ ( a_ = 10**12 ) ->int:
A =1
A =0
A =1
A =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase_ ( ) ->str:
A =ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=a_ )
A =parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
A =parser.parse_args()
if not hasattr(a_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main()
| 689 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__a = logging.get_logger(__name__)
@dataclass
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Union[str, Any] , **snake_case__ : Tuple ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A =deprecated_arg[3:]
A =not kwargs.pop(snake_case__ )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
A =kwargs.pop("tpu_name" , self.tpu_name )
A =kwargs.pop("device_idx" , self.device_idx )
A =kwargs.pop("eager_mode" , self.eager_mode )
A =kwargs.pop("use_xla" , self.use_xla )
super().__init__(**snake_case__ )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Name of TPU"} , )
_A = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_A = field(default=lowerCAmelCase__ , metadata={"help": "Benchmark models in eager model."} )
_A = field(
default=lowerCAmelCase__ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _a ( self : Any ):
"""simple docstring"""
requires_backends(self , ["tf"] )
A =None
if self.tpu:
try:
if self.tpu_name:
A =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
A =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
A =None
return tpu
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
A =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
A =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
A =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _a ( self : Dict ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _a ( self : Dict ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self : Tuple ):
"""simple docstring"""
return self.n_gpu > 0
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__a = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__a = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : str ):
"""simple docstring"""
A =WATERMARK_BITS
A =WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def _a ( self : List[Any] , snake_case__ : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 2_56:
return images
A =(2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A =[self.encoder.encode(snake_case__ , "dwtDct" ) for image in images]
A =torch.from_numpy(np.array(snake_case__ ) ).permute(0 , 3 , 1 , 2 )
A =torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 )
return images
| 689 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 1 |
def UpperCamelCase_ ( a_ , a_ ) ->float:
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(a_ ) * abs(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __get__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : int=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
A ="__cached_" + self.fget.__name__
A =getattr(snake_case__ , snake_case__ , snake_case__ )
if cached is None:
A =self.fget(snake_case__ )
setattr(snake_case__ , snake_case__ , snake_case__ )
return cached
def UpperCamelCase_ ( a_ ) ->Any:
A =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def UpperCamelCase_ ( a_ ) ->List[str]:
if is_torch_fx_proxy(a_ ):
return True
if is_torch_available():
import torch
if isinstance(a_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(a_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(a_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(a_ , np.ndarray )
def UpperCamelCase_ ( a_ ) ->List[Any]:
return isinstance(a_ , np.ndarray )
def UpperCamelCase_ ( a_ ) ->List[str]:
return _is_numpy(a_ )
def UpperCamelCase_ ( a_ ) ->Optional[int]:
import torch
return isinstance(a_ , torch.Tensor )
def UpperCamelCase_ ( a_ ) ->int:
return False if not is_torch_available() else _is_torch(a_ )
def UpperCamelCase_ ( a_ ) ->str:
import torch
return isinstance(a_ , torch.device )
def UpperCamelCase_ ( a_ ) ->int:
return False if not is_torch_available() else _is_torch_device(a_ )
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
import torch
if isinstance(a_ , a_ ):
if hasattr(a_ , a_ ):
A =getattr(a_ , a_ )
else:
return False
return isinstance(a_ , torch.dtype )
def UpperCamelCase_ ( a_ ) ->Dict:
return False if not is_torch_available() else _is_torch_dtype(a_ )
def UpperCamelCase_ ( a_ ) ->Any:
import tensorflow as tf
return isinstance(a_ , tf.Tensor )
def UpperCamelCase_ ( a_ ) ->List[str]:
return False if not is_tf_available() else _is_tensorflow(a_ )
def UpperCamelCase_ ( a_ ) ->Any:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(a_ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(a_ )
return type(a_ ) == tf.Tensor
def UpperCamelCase_ ( a_ ) ->List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(a_ )
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(a_ , jnp.ndarray )
def UpperCamelCase_ ( a_ ) ->Any:
return False if not is_flax_available() else _is_jax(a_ )
def UpperCamelCase_ ( a_ ) ->List[Any]:
if isinstance(a_ , (dict, UserDict) ):
return {k: to_py_obj(a_ ) for k, v in obj.items()}
elif isinstance(a_ , (list, tuple) ):
return [to_py_obj(a_ ) for o in obj]
elif is_tf_tensor(a_ ):
return obj.numpy().tolist()
elif is_torch_tensor(a_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(a_ ):
return np.asarray(a_ ).tolist()
elif isinstance(a_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCamelCase_ ( a_ ) ->List[str]:
if isinstance(a_ , (dict, UserDict) ):
return {k: to_numpy(a_ ) for k, v in obj.items()}
elif isinstance(a_ , (list, tuple) ):
return np.array(a_ )
elif is_tf_tensor(a_ ):
return obj.numpy()
elif is_torch_tensor(a_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(a_ ):
return np.asarray(a_ )
else:
return obj
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =fields(self )
# Safety and consistency checks
if not len(snake_case__ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
A =getattr(self , class_fields[0].name )
A =all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(snake_case__ ):
if isinstance(snake_case__ , snake_case__ ):
A =first_field.items()
A =True
else:
try:
A =iter(snake_case__ )
A =True
except TypeError:
A =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case__ ):
if (
not isinstance(snake_case__ , (list, tuple) )
or not len(snake_case__ ) == 2
or not isinstance(element[0] , snake_case__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A =element[1]
elif first_field is not None:
A =first_field
else:
for field in class_fields:
A =getattr(self , field.name )
if v is not None:
A =v
def __delitem__( self : int , *snake_case__ : int , **snake_case__ : List[str] ):
"""simple docstring"""
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _a ( self : int , *snake_case__ : int , **snake_case__ : List[str] ):
"""simple docstring"""
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _a ( self : Any , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
"""simple docstring"""
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _a ( self : List[str] , *snake_case__ : str , **snake_case__ : List[Any] ):
"""simple docstring"""
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
A =dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case__ , snake_case__ )
super().__setattr__(snake_case__ , snake_case__ )
def __setitem__( self : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] ):
"""simple docstring"""
super().__setitem__(snake_case__ , snake_case__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case__ , snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@classmethod
def _a ( cls : Optional[int] , snake_case__ : str ):
"""simple docstring"""
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "longest"
_A = "max_length"
_A = "do_not_pad"
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "pt"
_A = "tf"
_A = "np"
_A = "jax"
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : int , snake_case__ : List[ContextManager] ):
"""simple docstring"""
A =context_managers
A =ExitStack()
def __enter__( self : str ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(snake_case__ )
def __exit__( self : Dict , *snake_case__ : List[Any] , **snake_case__ : List[str] ):
"""simple docstring"""
self.stack.__exit__(*snake_case__ , **snake_case__ )
def UpperCamelCase_ ( a_ ) ->Dict:
A =infer_framework(a_ )
if framework == "tf":
A =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A =inspect.signature(model_class.forward ) # PyTorch models
else:
A =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
A =model_class.__name__
A =infer_framework(a_ )
if framework == "tf":
A =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A =inspect.signature(model_class.forward ) # PyTorch models
else:
A =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCamelCase_ ( a_ , a_ = "" , a_ = "." ) ->Optional[Any]:
def _flatten_dict(a_ , a_="" , a_="." ):
for k, v in d.items():
A =str(a_ ) + delimiter + str(a_ ) if parent_key else k
if v and isinstance(a_ , a_ ):
yield from flatten_dict(a_ , a_ , delimiter=a_ ).items()
else:
yield key, v
return dict(_flatten_dict(a_ , a_ , a_ ) )
@contextmanager
def UpperCamelCase_ ( a_ , a_ = False ) ->str:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCamelCase_ ( a_ , a_=None ) ->Union[str, Any]:
if is_numpy_array(a_ ):
return np.transpose(a_ , axes=a_ )
elif is_torch_tensor(a_ ):
return array.T if axes is None else array.permute(*a_ )
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.transpose(a_ , perm=a_ )
elif is_jax_tensor(a_ ):
return jnp.transpose(a_ , axes=a_ )
else:
raise ValueError(f'''Type not supported for transpose: {type(a_ )}.''' )
def UpperCamelCase_ ( a_ , a_ ) ->Optional[Any]:
if is_numpy_array(a_ ):
return np.reshape(a_ , a_ )
elif is_torch_tensor(a_ ):
return array.reshape(*a_ )
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.reshape(a_ , a_ )
elif is_jax_tensor(a_ ):
return jnp.reshape(a_ , a_ )
else:
raise ValueError(f'''Type not supported for reshape: {type(a_ )}.''' )
def UpperCamelCase_ ( a_ , a_=None ) ->List[Any]:
if is_numpy_array(a_ ):
return np.squeeze(a_ , axis=a_ )
elif is_torch_tensor(a_ ):
return array.squeeze() if axis is None else array.squeeze(dim=a_ )
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.squeeze(a_ , axis=a_ )
elif is_jax_tensor(a_ ):
return jnp.squeeze(a_ , axis=a_ )
else:
raise ValueError(f'''Type not supported for squeeze: {type(a_ )}.''' )
def UpperCamelCase_ ( a_ , a_ ) ->List[Any]:
if is_numpy_array(a_ ):
return np.expand_dims(a_ , a_ )
elif is_torch_tensor(a_ ):
return array.unsqueeze(dim=a_ )
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.expand_dims(a_ , axis=a_ )
elif is_jax_tensor(a_ ):
return jnp.expand_dims(a_ , axis=a_ )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(a_ )}.''' )
def UpperCamelCase_ ( a_ ) ->Any:
if is_numpy_array(a_ ):
return np.size(a_ )
elif is_torch_tensor(a_ ):
return array.numel()
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.size(a_ )
elif is_jax_tensor(a_ ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(a_ )}.''' )
def UpperCamelCase_ ( a_ , a_ ) ->Any:
for key, value in auto_map.items():
if isinstance(a_ , (tuple, list) ):
A =[f'''{repo_id}--{v}''' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
A =f'''{repo_id}--{value}'''
return auto_map
def UpperCamelCase_ ( a_ ) ->str:
for base_class in inspect.getmro(a_ ):
A =base_class.__module__
A =base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 689 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def UpperCamelCase_ ( ) ->Union[str, Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
A ="__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , a_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def UpperCamelCase_ ( ) ->Any:
assert _test_patching.open is open
A ="__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , a_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def UpperCamelCase_ ( ) ->Union[str, Any]:
# pandas.read_csv is not present in _test_patching
A ="__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , a_ ):
pass
def UpperCamelCase_ ( ) ->str:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
A ="__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , a_ ) is None
with patch_submodule(_test_patching , "len" , a_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def UpperCamelCase_ ( ) ->Optional[Any]:
A ="__test_patch_submodule_start_and_stop_mock__"
A =patch_submodule(_test_patching , "open" , a_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def UpperCamelCase_ ( ) ->Dict:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
A ="__test_patch_submodule_successive_join__"
A ="__test_patch_submodule_successive_dirname__"
A ="__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , a_ ):
with patch_submodule(_test_patching , "os.rename" , a_ ):
with patch_submodule(_test_patching , "os.path.dirname" , a_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , a_ ):
with patch_submodule(_test_patching , "os.path.join" , a_ ):
with patch_submodule(_test_patching , "os.path.dirname" , a_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def UpperCamelCase_ ( ) ->Union[str, Any]:
A ="__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , a_ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , a_ ):
pass
| 689 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 1 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__a = logging.get_logger(__name__)
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = CLIPConfig
_A = ["CLIPEncoderLayer"]
def __init__( self : Optional[Any] , snake_case__ : CLIPConfig ):
"""simple docstring"""
super().__init__(snake_case__ )
A =CLIPVisionModelWithProjection(config.vision_config )
A =nn.Linear(config.vision_config.projection_dim , 1 )
A =nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Optional[Any]=0.5 , snake_case__ : Dict=0.5 ):
"""simple docstring"""
A =self.vision_model(snake_case__ )[0]
A =self.p_head(snake_case__ )
A =nsfw_detected.flatten()
A =nsfw_detected > p_threshold
A =nsfw_detected.tolist()
if any(snake_case__ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(snake_case__ ):
if nsfw_detected_:
A =np.zeros(images[idx].shape )
A =self.w_head(snake_case__ )
A =watermark_detected.flatten()
A =watermark_detected > w_threshold
A =watermark_detected.tolist()
if any(snake_case__ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(snake_case__ ):
if watermark_detected_:
A =np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 689 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 1 |
from importlib import import_module
from .logging import get_logger
__a = get_logger(__name__)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
A =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , snake_case__ , getattr(snake_case__ , snake_case__ ) )
A =module._original_module if isinstance(snake_case__ , _PatchedModuleObj ) else module
class UpperCamelCase__:
"""simple docstring"""
_A = []
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None ):
"""simple docstring"""
A =obj
A =target
A =new
A =target.split("." )[0]
A ={}
A =attrs or []
def __enter__( self : int ):
"""simple docstring"""
*A , A =self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(snake_case__ ) ):
try:
A =import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A =getattr(self.obj , snake_case__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(snake_case__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A =obj_attr
# patch at top level
setattr(self.obj , snake_case__ , _PatchedModuleObj(snake_case__ , attrs=self.attrs ) )
A =getattr(self.obj , snake_case__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(snake_case__ , snake_case__ , _PatchedModuleObj(getattr(snake_case__ , snake_case__ , snake_case__ ) , attrs=self.attrs ) )
A =getattr(snake_case__ , snake_case__ )
# finally set the target attribute
setattr(snake_case__ , snake_case__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A =getattr(import_module(".".join(snake_case__ ) ) , snake_case__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , snake_case__ ) is attr_value:
A =getattr(self.obj , snake_case__ )
setattr(self.obj , snake_case__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A =globals()["__builtins__"][target_attr]
setattr(self.obj , snake_case__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : Any , *snake_case__ : Tuple ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj , snake_case__ , self.original.pop(snake_case__ ) )
def _a ( self : Dict ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def _a ( self : Optional[int] ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 689 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
A =img
A =img.shape[1]
A =img.shape[0]
A =dst_width
A =dst_height
A =self.src_w / self.dst_w
A =self.src_h / self.dst_h
A =A =(
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def _a ( self : Optional[int] ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
A =self.img[self.get_y(snake_case__ )][self.get_x(snake_case__ )]
def _a ( self : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
return int(self.ratio_x * x )
def _a ( self : Optional[int] , snake_case__ : int ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
__a , __a = 8_0_0, 6_0_0
__a = imread("""image_data/lena.jpg""", 1)
__a = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 689 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 1 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return 1 if input_a == input_a else 0
def UpperCamelCase_ ( ) ->None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 689 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 1 |
from collections import deque
def UpperCamelCase_ ( a_ ) ->List[str]:
A =len(a_ )
A =deque()
A =[False for _ in range(a_ )]
A =[-1 for _ in range(a_ )]
A =index_of[:]
def strong_connect(a_ , a_ , a_ ):
A =index # the number when this node is seen
A =index # lowest rank node reachable from here
index += 1
stack.append(a_ )
A =True
for w in g[v]:
if index_of[w] == -1:
A =strong_connect(a_ , a_ , a_ )
A =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A =[]
A =stack.pop()
A =False
component.append(a_ )
while w != v:
A =stack.pop()
A =False
component.append(a_ )
components.append(a_ )
return index
A =[]
for v in range(a_ ):
if index_of[v] == -1:
strong_connect(a_ , 0 , a_ )
return components
def UpperCamelCase_ ( a_ , a_ ) ->Dict:
A =[[] for _ in range(a_ )]
for u, v in edges:
g[u].append(a_ )
return g
if __name__ == "__main__":
# Test
__a = 7
__a = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__a = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__a = [(u, v) for u, v in zip(source, target)]
__a = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
__a = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
_A = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
_A = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_A = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
_A = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_A = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_A = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "A csv or a json file containing the training data."} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "A csv or a json file containing the validation data."} )
_A = field(default=lowerCAmelCase__ , metadata={"help": "A csv or a json file containing the test data."} )
def _a ( self : str ):
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
A =self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
A =self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_A = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_A = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def UpperCamelCase_ ( ) ->List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
A =training_args.get_process_log_level()
logger.setLevel(a_ )
datasets.utils.logging.set_verbosity(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
A ={"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
A =data_args.train_file.split("." )[-1]
A =data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
A =data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
A =load_dataset("csv" , data_files=a_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
A =load_dataset("json" , data_files=a_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
A =raw_datasets["train"].features["label"].names
A =len(a_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
A =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a_ , )
A =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=a_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
A ="max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
A ={"Refused": 0, "Entailed": 1}
A ={0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
A =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(a_ ):
A =[_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
A =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
A =examples["statement"]
A =list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
A =tokenizer(a_ , a_ , padding=a_ , max_length=a_ , truncation=a_ )
A =examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
A =raw_datasets.map(
a_ , batched=a_ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A =raw_datasets["train"]
if data_args.max_train_samples is not None:
A =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A =raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
A =raw_datasets["test"]
if data_args.max_predict_samples is not None:
A =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a_ ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a_ ):
A =p.predictions[0] if isinstance(p.predictions , a_ ) else p.predictions
A =np.argmax(a_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A =default_data_collator
elif training_args.fpaa:
A =DataCollatorWithPadding(a_ , pad_to_multiple_of=8 )
else:
A =None
# Initialize our Trainer
A =Trainer(
model=a_ , args=a_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a_ , tokenizer=a_ , data_collator=a_ , )
# Training
if training_args.do_train:
A =None
if training_args.resume_from_checkpoint is not None:
A =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A =last_checkpoint
A =trainer.train(resume_from_checkpoint=a_ )
A =train_result.metrics
A =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(a_ )
)
A =min(a_ , len(a_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , a_ )
trainer.save_metrics("train" , a_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A =trainer.evaluate(eval_dataset=a_ )
A =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a_ )
A =min(a_ , len(a_ ) )
trainer.log_metrics("eval" , a_ )
trainer.save_metrics("eval" , a_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
A =predict_dataset.remove_columns("label" )
A =trainer.predict(a_ , metric_key_prefix="predict" ).predictions
A =np.argmax(a_ , axis=1 )
A =os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(a_ , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(a_ ):
A =label_list[item]
writer.write(f'''{index}\t{item}\n''' )
A ={"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
def UpperCamelCase_ ( a_ ) ->List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 689 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__a = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 2048-bit
1_4: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 3072-bit
1_5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 4096-bit
1_6: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 6144-bit
1_7: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
# 8192-bit
1_8: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=1_6,
),
"""generator""": 2,
},
}
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case__ : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
A =primes[group]["prime"]
A =primes[group]["generator"]
A =int(hexlify(urandom(32 ) ) , base=16 )
def _a ( self : Optional[int] ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _a ( self : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _a ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
A =int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("Invalid public key" )
A =pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _a ( snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _a ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
"""simple docstring"""
A =int(snake_case__ , base=16 )
A =int(snake_case__ , base=16 )
A =primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("Invalid public key" )
A =pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase_ ( a_ ) ->List[str]:
if isinstance(a_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCamelCase__:
"""simple docstring"""
def _a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
pass
def _a ( self : List[str] ):
"""simple docstring"""
pass
def _a ( self : List[str] ):
"""simple docstring"""
pass
def _a ( self : List[str] , snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float ):
"""simple docstring"""
A =np.abs((a - b) ).max()
self.assertLessEqual(snake_case__ , snake_case__ , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _a ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str]=None , **snake_case__ : Dict ):
"""simple docstring"""
A =VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
A =FlaxVisionTextDualEncoderModel(snake_case__ )
A =model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : Any , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=None , **snake_case__ : Optional[int] ):
"""simple docstring"""
A , A =self.get_vision_text_model(snake_case__ , snake_case__ )
A ={"vision_model": vision_model, "text_model": text_model}
A =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
A =model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : int=None , **snake_case__ : str ):
"""simple docstring"""
A , A =self.get_vision_text_model(snake_case__ , snake_case__ )
A ={"vision_model": vision_model, "text_model": text_model}
A =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
A =model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
A =output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
A =FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
A =model(input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ )
A =after_output[0]
A =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1E-3 )
def _a ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any]=None , **snake_case__ : List[str] ):
"""simple docstring"""
A , A =self.get_vision_text_model(snake_case__ , snake_case__ )
A ={"vision_model": vision_model, "text_model": text_model}
A =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
A =model(
input_ids=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , output_attentions=snake_case__ )
A =output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A =to_atuple(vision_model.config.image_size )
A =to_atuple(vision_model.config.patch_size )
A =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A =output.text_model_output.attentions
self.assertEqual(len(snake_case__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
pt_model.to(snake_case__ )
pt_model.eval()
# prepare inputs
A =inputs_dict
A ={k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
A =pt_model(**snake_case__ ).to_tuple()
A =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
A =FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ , from_pt=snake_case__ )
A =fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
A =VisionTextDualEncoderModel.from_pretrained(snake_case__ , from_flax=snake_case__ )
pt_model_loaded.to(snake_case__ )
pt_model_loaded.eval()
with torch.no_grad():
A =pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case__ , pt_output_loaded.numpy() , 4E-2 )
def _a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
A =VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
A =VisionTextDualEncoderModel(snake_case__ )
A =FlaxVisionTextDualEncoderModel(snake_case__ )
A =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
A =fx_state
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def _a ( self : List[Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
A =VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ , snake_case__ )
A =VisionTextDualEncoderModel(snake_case__ )
A =FlaxVisionTextDualEncoderModel(snake_case__ )
A =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
self.check_pt_flax_equivalence(snake_case__ , snake_case__ , snake_case__ )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case__ )
def _a ( self : int ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
self.check_save_load(**snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case__ )
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
A =config_inputs_dict.pop("vision_config" )
A =config_inputs_dict.pop("text_config" )
A =config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case__ , snake_case__ , snake_case__ )
self.check_equivalence_flax_to_pt(snake_case__ , snake_case__ , snake_case__ )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A , A =self.get_pretrained_model_and_inputs()
A =model_a(**snake_case__ )
A =outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case__ )
A =FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
A =model_a(**snake_case__ )
A =after_outputs[0]
A =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1E-5 )
@require_flax
class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
A =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
A =13
A =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A =ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A =random_attention_mask([batch_size, 4] )
A ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str ):
"""simple docstring"""
A =FlaxViTModel(snake_case__ )
A =FlaxBertModel(snake_case__ )
return vision_model, text_model
def _a ( self : str ):
"""simple docstring"""
A =FlaxViTModelTester(self )
A =FlaxBertModelTester(self )
A =vit_model_tester.prepare_config_and_inputs()
A =bert_model_tester.prepare_config_and_inputs()
A , A =vision_config_and_inputs
A , A , A , A =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
A =FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=snake_case__ , text_from_pt=snake_case__ , )
A =13
A =floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A =ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A =random_attention_mask([batch_size, 4] )
A ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def _a ( self : int , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
A =FlaxCLIPVisionModel(snake_case__ )
A =FlaxBertModel(snake_case__ )
return vision_model, text_model
def _a ( self : Optional[int] ):
"""simple docstring"""
A =FlaxCLIPVisionModelTester(self )
A =FlaxBertModelTester(self )
A =clip_model_tester.prepare_config_and_inputs()
A =bert_model_tester.prepare_config_and_inputs()
A , A =vision_config_and_inputs
A , A , A , A =text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A =FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
A =VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
A =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A =processor(
text=["una foto di un gatto", "una foto di un cane"] , images=snake_case__ , padding=snake_case__ , return_tensors="np" )
A =model(**snake_case__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , snake_case__ , atol=1E-3 ) )
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = None
_A = None
_A = None
_A = None
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Tuple=1 , snake_case__ : str=0 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=5_12 , snake_case__ : Optional[Any]="cls" , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
A =project_dim
A =pooler_fn
A =learn_encoder
A =use_attention_mask
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [r"pooler", r"logit_scale"]
_A = [r"position_ids", r"predictions.decoder.bias"]
_A = "roberta"
_A = RobertaSeriesConfig
def __init__( self : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
super().__init__(snake_case__ )
A =XLMRobertaModel(snake_case__ )
A =nn.Linear(config.hidden_size , config.project_dim )
A =getattr(snake_case__ , "has_pre_transformation" , snake_case__ )
if self.has_pre_transformation:
A =nn.Linear(config.hidden_size , config.project_dim )
A =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _a ( self : str , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[torch.Tensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
A =return_dict if return_dict is not None else self.config.use_return_dict
A =self.base_model(
input_ids=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_attentions=snake_case__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=snake_case__ , )
if self.has_pre_transformation:
A =outputs["hidden_states"][-2]
A =self.pre_LN(snake_case__ )
A =self.transformation_pre(snake_case__ )
return TransformationModelOutput(
projection_state=snake_case__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
A =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=snake_case__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 689 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__a = logging.get_logger(__name__)
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
@staticmethod
def _a ( ):
"""simple docstring"""
raise NotImplementedError
def _a ( self : Any , snake_case__ : Dict , snake_case__ : int , snake_case__ : str , **snake_case__ : Dict ):
"""simple docstring"""
raise NotImplementedError
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
raise NotImplementedError
def _a ( self : Optional[Any] ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def _a ( cls : List[Any] ):
"""simple docstring"""
return f'''`pip install {cls.pip_package or cls.name}`'''
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "optuna"
@staticmethod
def _a ( ):
"""simple docstring"""
return is_optuna_available()
def _a ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : str , **snake_case__ : Optional[Any] ):
"""simple docstring"""
return run_hp_search_optuna(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
def _a ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
return default_hp_space_optuna(snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "ray"
_A = "'ray[tune]'"
@staticmethod
def _a ( ):
"""simple docstring"""
return is_ray_available()
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : str , **snake_case__ : Optional[Any] ):
"""simple docstring"""
return run_hp_search_ray(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
return default_hp_space_ray(snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "sigopt"
@staticmethod
def _a ( ):
"""simple docstring"""
return is_sigopt_available()
def _a ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : str , **snake_case__ : List[Any] ):
"""simple docstring"""
return run_hp_search_sigopt(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
def _a ( self : Optional[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
return default_hp_space_sigopt(snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "wandb"
@staticmethod
def _a ( ):
"""simple docstring"""
return is_wandb_available()
def _a ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : str , **snake_case__ : int ):
"""simple docstring"""
return run_hp_search_wandb(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
def _a ( self : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
return default_hp_space_wandb(snake_case__ )
__a = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase_ ( ) ->str:
A =[backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a_ ) > 0:
A =available_backends[0].name
if len(a_ ) > 1:
logger.info(
f'''{len(a_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 689 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 1 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__a = _symbol_database.Default()
__a = _descriptor_pool.Default().AddSerializedFile(
B"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
__a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__a = None
__a = B"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__a = 4_5
__a = 1_5_8_1
__a = 1_5_1_7
__a = 1_5_7_0
__a = 1_5_8_4
__a = 1_7_9_3
__a = 1_7_9_5
__a = 1_9_1_6
__a = 1_8_6_4
__a = 1_9_0_5
__a = 1_9_1_9
__a = 2_4_2_9
__a = 2_2_0_8
__a = 2_4_1_8
__a = 2_3_2_3
__a = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 689 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 1 |
from datetime import datetime as dt
import os
from github import Github
__a = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def UpperCamelCase_ ( ) ->Union[str, Any]:
A =Github(os.environ["GITHUB_TOKEN"] )
A =g.get_repo("huggingface/transformers" )
A =repo.get_issues(state="open" )
for issue in open_issues:
A =sorted([comment for comment in issue.get_comments()] , key=lambda a_ : i.created_at , reverse=a_ )
A =comments[0] if len(a_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 689 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : int = 7_68 , ):
"""simple docstring"""
super().__init__()
A =nn.Parameter(torch.zeros(1 , snake_case__ ) )
A =nn.Parameter(torch.ones(1 , snake_case__ ) )
def _a ( self : List[Any] , snake_case__ : Optional[Union[str, torch.device]] = None , snake_case__ : Optional[torch.dtype] = None , ):
"""simple docstring"""
A =nn.Parameter(self.mean.to(snake_case__ ).to(snake_case__ ) )
A =nn.Parameter(self.std.to(snake_case__ ).to(snake_case__ ) )
return self
def _a ( self : Dict , snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =(embeds - self.mean) * 1.0 / self.std
return embeds
def _a ( self : Dict , snake_case__ : str ):
"""simple docstring"""
A =(embeds * self.std) + self.mean
return embeds
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 1 |
from typing import Any
import numpy as np
def UpperCamelCase_ ( a_ ) ->bool:
return np.array_equal(a_ , matrix.conjugate().T )
def UpperCamelCase_ ( a_ , a_ ) ->Any:
A =v.conjugate().T
A =v_star.dot(a_ )
assert isinstance(a_ , np.ndarray )
return (v_star_dot.dot(a_ )) / (v_star.dot(a_ ))
def UpperCamelCase_ ( ) ->None:
A =np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
A =np.array([[1], [2], [3]] )
assert is_hermitian(a_ ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(a_ , a_ ) )
A =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(a_ ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(a_ , a_ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 689 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase__:
"""simple docstring"""
_A = BlenderbotSmallConfig
_A = {}
_A = "gelu"
def __init__( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]=13 , snake_case__ : Optional[int]=7 , snake_case__ : List[Any]=True , snake_case__ : Dict=False , snake_case__ : Union[str, Any]=99 , snake_case__ : Optional[Any]=32 , snake_case__ : str=2 , snake_case__ : Dict=4 , snake_case__ : Any=37 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=20 , snake_case__ : List[str]=2 , snake_case__ : Optional[int]=1 , snake_case__ : Optional[int]=0 , ):
"""simple docstring"""
A =parent
A =batch_size
A =seq_length
A =is_training
A =use_labels
A =vocab_size
A =hidden_size
A =num_hidden_layers
A =num_attention_heads
A =intermediate_size
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =eos_token_id
A =pad_token_id
A =bos_token_id
def _a ( self : List[str] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A =tf.concat([input_ids, eos_tensor] , axis=1 )
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A =prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def _a ( self : Any , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
A =TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
A =inputs_dict["input_ids"]
A =input_ids[:1, :]
A =inputs_dict["attention_mask"][:1, :]
A =inputs_dict["head_mask"]
A =1
# first forward pass
A =model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
A , A =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A =ids_tensor((self.batch_size, 3) , config.vocab_size )
A =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A =tf.concat([input_ids, next_tokens] , axis=-1 )
A =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A =model(snake_case__ , attention_mask=snake_case__ )[0]
A =model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A =output_from_no_past[:, -3:, random_slice_idx]
A =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
def UpperCamelCase_ ( a_ , a_ , a_ , a_=None , a_=None , a_=None , a_=None , a_=None , ) ->Optional[Any]:
if attention_mask is None:
A =tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_A = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_A = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A = True
_A = False
_A = False
def _a ( self : Tuple ):
"""simple docstring"""
A =TFBlenderbotSmallModelTester(self )
A =ConfigTester(self , config_class=snake_case__ )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
_A = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
_A = "facebook/blenderbot_small-90M"
@cached_property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def _a ( self : str ):
"""simple docstring"""
A =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A =self.tokenizer(self.src_text , return_tensors="tf" )
A =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , )
A =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 689 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__a = """src/transformers"""
# Matches is_xxx_available()
__a = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
__a = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__a = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
__a = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
__a = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__a = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
__a = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
__a = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
__a = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
__a = re.compile(r"""^\s*try:""")
# Catches a line with else:
__a = re.compile(r"""^\s*else:""")
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
if _re_test_backend.search(a_ ) is None:
return None
A =[b[0] for b in _re_backend.findall(a_ )]
backends.sort()
return "_and_".join(a_ )
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
A =f.readlines()
A =0
while line_index < len(a_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a_ ):
return None
# First grab the objects without a specific backend in _import_structure
A =[]
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
A =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a_ ):
A =_re_one_line_import_struct.search(a_ ).groups()[0]
A =re.findall("\[([^\]]+)\]" , a_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
A =_re_import_struct_key_value.search(a_ )
if single_line_import_search is not None:
A =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(a_ ) > 0]
objects.extend(a_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
A ={"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
A =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
A =lines[line_index]
if _re_import_struct_add_one.search(a_ ) is not None:
objects.append(_re_import_struct_add_one.search(a_ ).groups()[0] )
elif _re_import_struct_add_many.search(a_ ) is not None:
A =_re_import_struct_add_many.search(a_ ).groups()[0].split(", " )
A =[obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_between_brackets.search(a_ ) is not None:
A =_re_between_brackets.search(a_ ).groups()[0].split(", " )
A =[obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_quote_object.search(a_ ) is not None:
objects.append(_re_quote_object.search(a_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
A =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A =[]
while (
line_index < len(a_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
A =lines[line_index]
A =_re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
A ={"none": objects}
# Let's continue with backend-specific objects
while line_index < len(a_ ):
# If the line is an if is_backend_available, we grab all objects associated.
A =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
A =lines[line_index]
A =_re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
A =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase_ ( a_ , a_ ) ->int:
def find_duplicates(a_ ):
return [k for k, v in collections.Counter(a_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A =[]
for key in import_dict_objects.keys():
A =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
A =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A ="base imports" if key == "none" else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCamelCase_ ( ) ->Tuple:
A =[]
for root, _, files in os.walk(a_ ):
if "__init__.py" in files:
A =os.path.join(a_ , "__init__.py" )
A =parse_init(a_ )
if objects is not None:
A =analyze_results(*a_ )
if len(a_ ) > 0:
A =f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(a_ ) )
if len(a_ ) > 0:
raise ValueError("\n\n".join(a_ ) )
def UpperCamelCase_ ( ) ->Dict:
A =[]
for path, directories, files in os.walk(a_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(a_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a_ ) / folder).glob("*.py" ) ) ) == 0:
continue
A =str((Path(a_ ) / folder).relative_to(a_ ) )
A =short_path.replace(os.path.sep , "." )
submodules.append(a_ )
for fname in files:
if fname == "__init__.py":
continue
A =str((Path(a_ ) / fname).relative_to(a_ ) )
A =short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(a_ )
return submodules
__a = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def UpperCamelCase_ ( ) ->Any:
# This is to make sure the transformers module imported is the one in the repo.
A =importlib.util.spec_from_file_location(
"transformers" , os.path.join(a_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
A =spec.loader.load_module()
A =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(a_ ) > 0:
A ="\n".join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
if exponent == 1:
return base
if exponent % 2 == 0:
A =_modexpt(a_ , exponent // 2 , a_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(a_ , exponent - 1 , a_ )) % modulo_value
def UpperCamelCase_ ( a_ = 1777 , a_ = 1855 , a_ = 8 ) ->int:
A =base
for _ in range(1 , a_ ):
A =_modexpt(a_ , a_ , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : int , snake_case__ : int = 16 , snake_case__ : int = 88 , snake_case__ : Optional[int] = None , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : bool = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "geglu" , snake_case__ : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
A =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case__ , attention_head_dim=snake_case__ , in_channels=snake_case__ , num_layers=snake_case__ , dropout=snake_case__ , norm_num_groups=snake_case__ , cross_attention_dim=snake_case__ , attention_bias=snake_case__ , sample_size=snake_case__ , num_vector_embeds=snake_case__ , activation_fn=snake_case__ , num_embeds_ada_norm=snake_case__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A =[77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A =[1, 0]
def _a ( self : Optional[int] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Tuple=None , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
"""simple docstring"""
A =hidden_states
A =[]
A =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A =self.transformer_index_for_condition[i]
A =self.transformers[transformer_index](
snake_case__ , encoder_hidden_states=snake_case__ , timestep=snake_case__ , cross_attention_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case__ )
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def UpperCamelCase_ ( a_ ) ->np.ndarray:
A , A , A =rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def UpperCamelCase_ ( a_ ) ->np.ndarray:
return (gray > 127) & (gray <= 255)
def UpperCamelCase_ ( a_ , a_ ) ->np.ndarray:
A =np.zeros_like(a_ )
A =np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A =image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A =(
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A =int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__a = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
__a = np.array(Image.open(lena_path))
# kernel to be applied
__a = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__a = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__a = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 689 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 1 |
from __future__ import annotations
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int | float:
if len(a_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a_ )
or left < -len(a_ )
or right >= len(a_ )
or right < -len(a_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
A =(left + right) >> 1 # the middle
A =find_max(a_ , a_ , a_ ) # find max in range[left, mid]
A =find_max(a_ , mid + 1 , a_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 689 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase_ ( ) ->Tuple:
A =9
A =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A =kruskal(a_ , a_ )
A =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(a_ ) == sorted(a_ )
| 689 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 1 |
def UpperCamelCase_ ( a_ ) ->bool:
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def UpperCamelCase_ ( a_ ) ->bool:
A =credit_card_number
A =0
A =len(a_ ) - 2
for i in range(a_ , -1 , -2 ):
# double the value of every second digit
A =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A =cc_number[:i] + str(a_ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(a_ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCamelCase_ ( a_ ) ->bool:
A =f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(a_ ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(a_ ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(a_ ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 689 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 1 |
import re
import string
import numpy as np
import datasets
__a = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
__a = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
__a = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _a ( self : str , snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=False , snake_case__ : Any=False , snake_case__ : Optional[int]=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
A =np.array([re.sub(snake_case__ , "" , snake_case__ ) for x in predictions] )
A =np.array([re.sub(snake_case__ , "" , snake_case__ ) for x in references] )
else:
A =np.asarray(snake_case__ )
A =np.asarray(snake_case__ )
if ignore_case:
A =np.char.lower(snake_case__ )
A =np.char.lower(snake_case__ )
if ignore_punctuation:
A =string.punctuation.maketrans("" , "" , string.punctuation )
A =np.char.translate(snake_case__ , table=snake_case__ )
A =np.char.translate(snake_case__ , table=snake_case__ )
if ignore_numbers:
A =string.digits.maketrans("" , "" , string.digits )
A =np.char.translate(snake_case__ , table=snake_case__ )
A =np.char.translate(snake_case__ , table=snake_case__ )
A =predictions == references
return {"exact_match": np.mean(snake_case__ ) * 1_00}
| 689 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = KandinskyInpaintPipeline
_A = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
_A = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
_A = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_A = False
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def _a ( self : str ):
"""simple docstring"""
return 32
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self : Tuple ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return 1_00
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
A =XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
A =MultilingualCLIP(snake_case__ )
A =text_encoder.eval()
return text_encoder
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
A ={
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A =UNetaDConditionModel(**snake_case__ )
return model
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
A =VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.dummy_text_encoder
A =self.dummy_tokenizer
A =self.dummy_unet
A =self.dummy_movq
A =DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case__ , )
A ={
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _a ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : int=0 ):
"""simple docstring"""
A =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
A =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case__ )
# create init_image
A =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
A =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A =Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
A =np.ones((64, 64) , dtype=np.floataa )
A =0
if str(snake_case__ ).startswith("mps" ):
A =torch.manual_seed(snake_case__ )
else:
A =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
A ={
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A ="cpu"
A =self.get_dummy_components()
A =self.pipeline_class(**snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =pipe(**self.get_dummy_inputs(snake_case__ ) )
A =output.images
A =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
A =image[0, -3:, -3:, -1]
A =image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
A =np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _a ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Tuple ):
"""simple docstring"""
A =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
A =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
A =np.ones((7_68, 7_68) , dtype=np.floataa )
A =0
A ="a hat"
A =KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
A =KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
A =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device="cpu" ).manual_seed(0 )
A , A =pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A =pipeline(
snake_case__ , image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
A =output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 689 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 1 |
def UpperCamelCase_ ( a_ ) ->list[list[float]]:
A =[]
for data in source_data:
for i, el in enumerate(a_ ):
if len(a_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a_ ) )
return data_lists
def UpperCamelCase_ ( a_ , a_ ) ->list[list[float]]:
A =[]
for dlist, weight in zip(a_ , a_ ):
A =min(a_ )
A =max(a_ )
A =[]
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A =f'''Invalid weight of {weight:f} provided'''
raise ValueError(a_ )
score_lists.append(a_ )
return score_lists
def UpperCamelCase_ ( a_ ) ->list[float]:
A =[0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a_ ):
A =final_scores[j] + ele
return final_scores
def UpperCamelCase_ ( a_ , a_ ) ->list[list[float]]:
A =get_data(a_ )
A =calculate_each_score(a_ , a_ )
A =generate_final_scores(a_ )
# append scores to source data
for i, ele in enumerate(a_ ):
source_data[i].append(a_ )
return source_data
| 689 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "xlm-prophetnet"
_A = ["past_key_values"]
_A = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : Union[str, Any] , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[Union[str, Callable]] = "gelu" , snake_case__ : Optional[int] = 3_05_22 , snake_case__ : Optional[int] = 10_24 , snake_case__ : Optional[int] = 40_96 , snake_case__ : Optional[int] = 12 , snake_case__ : Optional[int] = 16 , snake_case__ : Optional[int] = 40_96 , snake_case__ : Optional[int] = 12 , snake_case__ : Optional[int] = 16 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[int] = 5_12 , snake_case__ : Optional[float] = 0.02 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 2 , snake_case__ : Optional[int] = 32 , snake_case__ : Optional[int] = 1_28 , snake_case__ : Optional[bool] = False , snake_case__ : Optional[float] = 0.0 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[int] = 2 , **snake_case__ : Optional[int] , ):
"""simple docstring"""
A =vocab_size
A =hidden_size
A =encoder_ffn_dim
A =num_encoder_layers
A =num_encoder_attention_heads
A =decoder_ffn_dim
A =num_decoder_layers
A =num_decoder_attention_heads
A =max_position_embeddings
A =init_std # Normal(0, this parameter)
A =activation_function
# parameters for xlmprophetnet
A =ngram
A =num_buckets
A =relative_max_distance
A =disable_ngram_loss
A =eps
# 3 Types of Dropout
A =attention_dropout
A =activation_dropout
A =dropout
A =use_cache
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _a ( self : Optional[int] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 689 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 1 |
def UpperCamelCase_ ( a_ ) ->list:
if len(a_ ) < 2:
return collection
def circle_sort_util(a_ , a_ , a_ ) -> bool:
A =False
if low == high:
return swapped
A =low
A =high
while left < right:
if collection[left] > collection[right]:
A , A =(
collection[right],
collection[left],
)
A =True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A , A =(
collection[right + 1],
collection[left],
)
A =True
A =low + int((high - low) / 2 )
A =circle_sort_util(a_ , a_ , a_ )
A =circle_sort_util(a_ , mid + 1 , a_ )
return swapped or left_swap or right_swap
A =True
while is_not_sorted is True:
A =circle_sort_util(a_ , 0 , len(a_ ) - 1 )
return collection
if __name__ == "__main__":
__a = input("""Enter numbers separated by a comma:\n""").strip()
__a = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 689 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def UpperCamelCase_ ( a_ = 100_0000 , a_ = 10 ) ->int:
A =defaultdict(a_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
A =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
A =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__a = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
A =test_results.split(" " )
A =0
A =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A =expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(a_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ={}
A =None
A =False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , a_ ):
A =True
A =line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
A =line
A =False
return failures
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Any , snake_case__ : str , snake_case__ : Dict ):
"""simple docstring"""
A =title
A =doc_test_results["time_spent"].split("," )[0]
A =doc_test_results["success"]
A =doc_test_results["failures"]
A =self.n_success + self.n_failures
# Failures and success of the modeling tests
A =doc_test_results
@property
def _a ( self : List[str] ):
"""simple docstring"""
A =[self._time_spent]
A =0
for time in time_spent:
A =time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(snake_case__ ) == 1:
A =[0, 0, time_parts[0]]
A , A , A =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A , A , A =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'''{int(snake_case__ )}h{int(snake_case__ )}m{int(snake_case__ )}s'''
@property
def _a ( self : List[str] ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _a ( self : Dict ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =40
A ={k: v["failed"] for k, v in doc_test_results.items() if isinstance(snake_case__ , snake_case__ )}
A =""
for category, failures in category_failures.items():
if len(snake_case__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(snake_case__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _a ( self : List[str] ):
"""simple docstring"""
A =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(snake_case__ )
@staticmethod
def _a ( ):
"""simple docstring"""
A =[
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(snake_case__ )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=snake_case__ , )
def _a ( self : Dict ):
"""simple docstring"""
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
A =f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else "All tests passed."
A =client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=snake_case__ , )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
A =""
for key, value in failures.items():
A =value[:2_00] + " [Truncated]" if len(snake_case__ ) > 2_50 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
A =job_name
A ={"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
A ={
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _a ( self : Dict ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
A =self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
A =sorted(self.doc_test_results.items() , key=lambda snake_case__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
A =f'''*Num failures* :{len(job_result["failed"] )} \n'''
A =job_result["failures"]
A =self.get_reply_blocks(snake_case__ , snake_case__ , snake_case__ , text=snake_case__ )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'''Results for {job}''' , blocks=snake_case__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def UpperCamelCase_ ( ) ->Union[str, Any]:
A =os.environ["GITHUB_RUN_ID"]
A =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
A =requests.get(a_ ).json()
A ={}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
A =math.ceil((result["total_count"] - 100) / 100 )
for i in range(a_ ):
A =requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , a_ )
return {}
def UpperCamelCase_ ( a_ ) ->List[str]:
A ={}
if os.path.exists(a_ ):
A =os.listdir(a_ )
for file in files:
try:
with open(os.path.join(a_ , a_ ) , encoding="utf-8" ) as f:
A =f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(a_ , a_ )}.''' ) from e
return _artifact
def UpperCamelCase_ ( ) ->str:
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : str , snake_case__ : str ):
"""simple docstring"""
A =name
A =[]
def __str__( self : Dict ):
"""simple docstring"""
return self.name
def _a ( self : int , snake_case__ : str ):
"""simple docstring"""
self.paths.append({"name": self.name, "path": path} )
A ={}
A =filter(os.path.isdir , os.listdir() )
for directory in directories:
A =directory
if artifact_name not in _available_artifacts:
A =Artifact(a_ )
_available_artifacts[artifact_name].add_path(a_ )
return _available_artifacts
if __name__ == "__main__":
__a = get_job_links()
__a = retrieve_available_artifacts()
__a = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__a = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__a = github_actions_job_links.get("""run_doctests""")
__a = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__a = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__a , __a , __a = handle_test_results(artifact["""stats"""])
__a = failed
__a = success
__a = time_spent[1:-1] + """, """
__a = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__a = line.replace("""FAILED """, """""")
__a = line.split()[0].replace("""\n""", """""")
if "::" in line:
__a , __a = line.split("""::""")
else:
__a , __a = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__a = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__a = all_failures[test] if test in all_failures else """N/A"""
__a = failure
break
__a = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "xlm-roberta-xl"
def __init__( self : int , snake_case__ : Any=25_08_80 , snake_case__ : Dict=25_60 , snake_case__ : str=36 , snake_case__ : int=32 , snake_case__ : List[str]=1_02_40 , snake_case__ : Dict="gelu" , snake_case__ : Any=0.1 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=5_14 , snake_case__ : Any=1 , snake_case__ : List[Any]=0.02 , snake_case__ : Dict=1E-05 , snake_case__ : Union[str, Any]=1 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]="absolute" , snake_case__ : List[str]=True , snake_case__ : Dict=None , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
A =vocab_size
A =hidden_size
A =num_hidden_layers
A =num_attention_heads
A =hidden_act
A =intermediate_size
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =type_vocab_size
A =initializer_range
A =layer_norm_eps
A =position_embedding_type
A =use_cache
A =classifier_dropout
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
@property
def _a ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
A ={0: "batch", 1: "choice", 2: "sequence"}
else:
A ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 689 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def UpperCamelCase_ ( a_ , a_=False , a_=False ) ->List[str]:
A ="backbone." if is_semantic else ""
A =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(f'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(f'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(f'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase_ ( a_ , a_ , a_=False , a_=False ) ->str:
for i in range(config.num_hidden_layers ):
A ="backbone." if is_semantic else ""
# queries, keys and values
A =state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A =state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A =state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A =in_proj_weight[
: config.hidden_size, :
]
A =q_bias
A =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A =in_proj_weight[
-config.hidden_size :, :
]
A =v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A =state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A =state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A =gamma_a
A =gamma_a
def UpperCamelCase_ ( a_ , a_ , a_ ) ->str:
A =dct.pop(a_ )
A =val
def UpperCamelCase_ ( ) ->List[Any]:
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
A =Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ , a_=False ) ->List[Any]:
A =False if "rvlcdip" in checkpoint_url else True
A =BeitConfig(use_absolute_position_embeddings=a_ , use_mask_token=a_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A =1024
A =4096
A =24
A =16
# labels
if "rvlcdip" in checkpoint_url:
A =16
A ="huggingface/label-files"
A ="rvlcdip-id2label.json"
A =json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
A ={int(a_ ): v for k, v in idalabel.items()}
A =idalabel
A ={v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A =torch.hub.load_state_dict_from_url(a_ , map_location="cpu" )["model"]
A =create_rename_keys(a_ , has_lm_head=a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ , has_lm_head=a_ )
# load HuggingFace model
A =BeitForMaskedImageModeling(a_ ) if has_lm_head else BeitForImageClassification(a_ )
model.eval()
model.load_state_dict(a_ )
# Check outputs on an image
A =BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=a_ )
A =prepare_img()
A =image_processor(images=a_ , return_tensors="pt" )
A =encoding["pixel_values"]
A =model(a_ )
A =outputs.logits
# verify logits
A =[1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(a_ ), "Shape of logits not as expected"
Path(a_ ).mkdir(exist_ok=a_ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a_ )
if push_to_hub:
if has_lm_head:
A ="dit-base" if "base" in checkpoint_url else "dit-large"
else:
A ="dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=a_ , )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=a_ , )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__a = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 689 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 1 |
import torch
from transformers import AutoModel
class UpperCamelCase__( torch.nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case__ : Dict="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(snake_case__ , self ).__init__()
A =AutoModel.from_pretrained(snake_case__ , return_dict=snake_case__ )
A =torch.nn.CosineSimilarity(3 , 1E-08 )
A =torch.nn.Softmax(dim=1 )
def _a ( self : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.bert(**snake_case__ ).last_hidden_state
def _a ( self : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=snake_case__ )
def _a ( self : int , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(snake_case__ , snake_case__ ) )
def _a ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
"""simple docstring"""
A =W_supports["sizes"].tolist()
A =W_supports["start_token_id"].item()
A =W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A =self.BERT(**snake_case__ )
A =self.BERT(**snake_case__ )
A =None
A =None
A =W_supports["input_ids"] == start_token_id
A =W_supports["input_ids"] == end_token_id
for i, size in enumerate(snake_case__ ):
if i == 0:
A =0
else:
A =support_sizes[i - 1]
A =S[s : s + size][start_token_masks[s : s + size]]
A =S[s : s + size][end_token_masks[s : s + size]]
A =torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
A =torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A =torch.vstack((p_starts, p_start) )
A =torch.vstack((p_ends, p_end) )
else:
A =p_start
A =p_end
return p_starts, p_ends
| 689 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__a = 1_0
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->int:
for i in range(a_ , a_ ):
if array[i] == target:
return i
return -1
def UpperCamelCase_ ( a_ , a_ ) ->int:
A =0
A =len(a_ )
while left <= right:
if right - left < precision:
return lin_search(a_ , a_ , a_ , a_ )
A =(left + right) // 3 + 1
A =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A =one_third - 1
elif array[two_third] < target:
A =two_third + 1
else:
A =one_third + 1
A =two_third - 1
else:
return -1
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->int:
if left < right:
if right - left < precision:
return lin_search(a_ , a_ , a_ , a_ )
A =(left + right) // 3 + 1
A =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a_ , one_third - 1 , a_ , a_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , a_ , a_ , a_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , a_ , a_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input("""Enter numbers separated by comma:\n""").strip()
__a = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__a = int(input("""Enter the number to be found in the list:\n""").strip())
__a = ite_ternary_search(collection, target)
__a = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 689 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 1 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__a = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase__( datasets.BuilderConfig ):
"""simple docstring"""
_A = None
def UpperCamelCase_ ( a_ , a_ , ) ->Optional[Any]:
import pyspark
def generate_fn():
A =df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
A =df_with_partition_id.select("*" ).where(f'''part_id = {partition_id}''' ).drop("part_id" )
A =partition_df.collect()
A =0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase__( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : "pyspark.sql.DataFrame" , snake_case__ : List[Any]=None , ):
"""simple docstring"""
A =df
A =partition_order or range(self.df.rdd.getNumPartitions() )
A =_generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Dict ):
"""simple docstring"""
yield from self.generate_examples_fn()
def _a ( self : Union[str, Any] , snake_case__ : np.random.Generator ):
"""simple docstring"""
A =list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
def _a ( self : str , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
A =self.split_shard_indices_by_worker(snake_case__ , snake_case__ )
return SparkExamplesIterable(self.df , partition_order=snake_case__ )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return len(self.partition_order )
class UpperCamelCase__( datasets.DatasetBuilder ):
"""simple docstring"""
_A = SparkConfig
def __init__( self : List[str] , snake_case__ : "pyspark.sql.DataFrame" , snake_case__ : str = None , snake_case__ : str = None , **snake_case__ : List[str] , ):
"""simple docstring"""
import pyspark
A =pyspark.sql.SparkSession.builder.getOrCreate()
A =df
A =working_dir
super().__init__(
cache_dir=snake_case__ , config_name=str(self.df.semanticHash() ) , **snake_case__ , )
def _a ( self : Dict ):
"""simple docstring"""
def create_cache_and_write_probe(snake_case__ : Tuple ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case__ )
A =os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case__ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A =(
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _a ( self : Dict ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : List[str] , snake_case__ : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _a ( self : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(snake_case__ : Optional[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
A =self.df.count()
A =df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A =(
self.df.limit(snake_case__ )
.repartition(1 )
.mapInArrow(snake_case__ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A =approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A =min(snake_case__ , int(approx_total_size / max_shard_size ) )
A =self.df.repartition(snake_case__ )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , ):
"""simple docstring"""
import pyspark
A =ParquetWriter if file_format == "parquet" else ArrowWriter
A =os.path.join(self._working_dir , os.path.basename(snake_case__ ) ) if self._working_dir else fpath
A =file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A =self.config.features
A =self._writer_batch_size
A =self._fs.storage_options
def write_arrow(snake_case__ : Union[str, Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A =pyspark.TaskContext().taskAttemptId()
A =next(snake_case__ , snake_case__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
A =0
A =writer_class(
features=snake_case__ , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
A =pa.Table.from_batches([first_batch] )
writer.write_table(snake_case__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A , A =writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
A =writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , writer_batch_size=snake_case__ , storage_options=snake_case__ , embed_local_files=snake_case__ , )
A =pa.Table.from_batches([batch] )
writer.write_table(snake_case__ )
if writer._num_bytes > 0:
A , A =writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case__ ) ):
A =os.path.join(os.path.dirname(snake_case__ ) , os.path.basename(snake_case__ ) )
shutil.move(snake_case__ , snake_case__ )
A =(
self.df.mapInArrow(snake_case__ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _a ( self : List[str] , snake_case__ : "datasets.SplitGenerator" , snake_case__ : str = "arrow" , snake_case__ : Optional[Union[str, int]] = None , snake_case__ : Optional[int] = None , **snake_case__ : Optional[Any] , ):
"""simple docstring"""
self._validate_cache_dir()
A =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case__ )
A =not is_remote_filesystem(self._fs )
A =os.path.join if is_local else posixpath.join
A ="-TTTTT-SSSSS-of-NNNNN"
A =f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
A =path_join(self._output_dir , snake_case__ )
A =0
A =0
A =0
A =[]
A =[]
for task_id, content in self._prepare_split_single(snake_case__ , snake_case__ , snake_case__ ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) =content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case__ )
A =total_num_examples
A =total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
A =all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A =self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
snake_case__ : int , snake_case__ : int , snake_case__ : int , ):
rename(
snake_case__ , fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , f'''{global_shard_id:05d}''' ).replace("NNNNN" , f'''{total_shards:05d}''' ) , )
A =[]
A =0
for i in range(len(snake_case__ ) ):
A , A =task_id_and_num_shards[i]
for shard_id in range(snake_case__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case__ , len(snake_case__ ) ).map(lambda snake_case__ : _rename_shard(*snake_case__ ) ).collect()
else:
# don't use any pattern
A =0
A =task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f'''{shard_id:05d}''' ).replace("TTTTT" , f'''{task_id:05d}''' ) , fpath.replace(snake_case__ , "" ) , )
def _a ( self : Union[str, Any] , snake_case__ : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 1 |
import argparse
from collections import defaultdict
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->Optional[Any]:
A =f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(a_ , "r" ) as f:
A =f.readlines()
A =f'''class {class_name}('''
A =f'''{4 * " "}def {test_name}('''
A =f'''{8 * " "}{correct_line.split()[0]}'''
A =f'''{16 * " "}{correct_line.split()[0]}'''
A =False
A =False
A =False
A =False
A =0
A =0
A =[]
for line in lines:
if line.startswith(a_ ):
A =True
elif in_class and line.startswith(a_ ):
A =True
elif in_class and in_func and (line.startswith(a_ ) or line.startswith(a_ )):
A =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A =True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * " "}{correct_line}''' )
A =A =A =A =False
else:
new_lines.append(a_ )
with open(a_ , "w" ) as f:
for line in new_lines:
f.write(a_ )
def UpperCamelCase_ ( a_ , a_=None ) ->Dict:
if fail is not None:
with open(a_ , "r" ) as f:
A ={l.strip() for l in f.readlines()}
else:
A =None
with open(a_ , "r" ) as f:
A =f.readlines()
A =defaultdict(a_ )
for line in correct_lines:
A , A , A , A =line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(a_ , a_ , a_ , a_ , a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
__a = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 689 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 1 |
def UpperCamelCase_ ( a_ = 5000_0000 ) ->int:
A =set()
A =int((limit - 24) ** (1 / 2) )
A =set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
A =primea * primea
for primea in primes:
A =primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
A =primea * primea * primea * primea
A =square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 1 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
from math import ceil, sqrt
def UpperCamelCase_ ( a_ = 100_0000 ) ->int:
A =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
A =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
A =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 1 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "SpeechT5FeatureExtractor"
_A = "SpeechT5Tokenizer"
def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
def __call__( self : Optional[int] , *snake_case__ : Optional[int] , **snake_case__ : Tuple ):
"""simple docstring"""
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
A =kwargs.pop("text_target" , snake_case__ )
A =kwargs.pop("audio_target" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
elif text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
else:
A =None
if audio_target is not None:
A =self.feature_extractor(audio_target=snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
A =targets["input_values"]
elif text_target is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
A =targets["input_ids"]
else:
A =None
if inputs is None:
return targets
if targets is not None:
A =labels
A =targets.get("attention_mask" )
if decoder_attention_mask is not None:
A =decoder_attention_mask
return inputs
def _a ( self : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =kwargs.pop("input_values" , snake_case__ )
A =kwargs.pop("input_ids" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
elif input_ids is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
else:
A =None
if labels is not None:
if "input_ids" in labels or (isinstance(snake_case__ , snake_case__ ) and "input_ids" in labels[0]):
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
A =targets["input_ids"]
else:
A =self.feature_extractor.feature_size
A =self.feature_extractor.num_mel_bins
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
A =feature_size_hack
A =targets["input_values"]
else:
A =None
if inputs is None:
return targets
if targets is not None:
A =labels
A =targets.get("attention_mask" )
if decoder_attention_mask is not None:
A =decoder_attention_mask
return inputs
def _a ( self : str , *snake_case__ : List[Any] , **snake_case__ : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : Optional[Any] , *snake_case__ : Optional[int] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCamelCase_ ( ) ->int:
A =ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
A =parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(a_ )
# Let's go
A =parser.parse_args()
if not hasattr(a_ , "func" ):
parser.print_help()
exit(1 )
# Run
A =args.func(a_ )
service.run()
if __name__ == "__main__":
main()
| 689 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 1 |
from typing import List
import numpy as np
def UpperCamelCase_ ( a_ ) ->int:
A ={key: len(a_ ) for key, value in gen_kwargs.items() if isinstance(a_ , a_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
A =max(lists_lengths.values() , default=0 )
return max(1 , a_ )
def UpperCamelCase_ ( a_ , a_ ) ->List[range]:
A =[]
for group_idx in range(a_ ):
A =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
A =shards_indices_per_group[-1].stop if shards_indices_per_group else 0
A =range(a_ , start + num_shards_to_add )
shards_indices_per_group.append(a_ )
return shards_indices_per_group
def UpperCamelCase_ ( a_ , a_ ) ->List[dict]:
A =_number_of_shards_in_gen_kwargs(a_ )
if num_shards == 1:
return [dict(a_ )]
else:
A =_distribute_shards(num_shards=a_ , max_num_jobs=a_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(a_ , a_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(a_ ) )
]
def UpperCamelCase_ ( a_ ) ->dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , a_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCamelCase_ ( a_ , a_ ) ->dict:
A ={len(a_ ) for value in gen_kwargs.values() if isinstance(a_ , a_ )}
A ={}
for size in list_sizes:
A =list(range(a_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
A =dict(a_ )
for key, value in shuffled_kwargs.items():
if isinstance(a_ , a_ ):
A =[value[i] for i in indices_per_size[len(a_ )]]
return shuffled_kwargs
| 689 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Tuple ):
"""simple docstring"""
A =tempfile.mkdtemp()
# fmt: off
A =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
A ={
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
A =os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def _a ( self : Tuple , **snake_case__ : Any ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _a ( self : Optional[int] , **snake_case__ : Tuple ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def _a ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Tuple ):
"""simple docstring"""
A =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A =[Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.get_tokenizer()
A =self.get_image_processor()
A =VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor.save_pretrained(self.tmpdirname )
A =VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _a ( self : List[str] ):
"""simple docstring"""
A =VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A =self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
A =VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _a ( self : int ):
"""simple docstring"""
A =self.get_image_processor()
A =self.get_tokenizer()
A =VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
A =self.prepare_image_inputs()
A =image_processor(snake_case__ , return_tensors="np" )
A =processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.get_image_processor()
A =self.get_tokenizer()
A =VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
A ="lower newer"
A =processor(text=snake_case__ )
A =tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.get_image_processor()
A =self.get_tokenizer()
A =VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
A ="lower newer"
A =self.prepare_image_inputs()
A =processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(snake_case__ ):
processor()
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.get_image_processor()
A =self.get_tokenizer()
A =VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
A =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A =processor.batch_decode(snake_case__ )
A =tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.get_image_processor()
A =self.get_tokenizer()
A =VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
A ="lower newer"
A =self.prepare_image_inputs()
A =processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 689 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""ConvNextFeatureExtractor"""]
__a = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 689 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = """▁"""
__a = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
__a = {
"""google/pegasus-xsum""": 5_1_2,
}
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PegasusTokenizer
_A = ["input_ids", "attention_mask"]
def __init__( self : List[str] , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Any="<pad>" , snake_case__ : str="</s>" , snake_case__ : Optional[Any]="<unk>" , snake_case__ : Union[str, Any]="<mask_2>" , snake_case__ : List[Any]="<mask_1>" , snake_case__ : Tuple=None , snake_case__ : List[str]=1_03 , **snake_case__ : str , ):
"""simple docstring"""
A =offset
if additional_special_tokens is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(snake_case__ )}, but is'''
f''' {type(snake_case__ )}''' )
A =(
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(snake_case__ ) , self.offset - 1 )
]
if len(set(snake_case__ ) ) != len(snake_case__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
A =additional_special_tokens_extended
else:
A =[mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , pad_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , **snake_case__ , )
A =vocab_file
A =False if not self.vocab_file else True
def _a ( self : Optional[int] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _a ( self : Tuple , snake_case__ : List , snake_case__ : Optional[List] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case__ )
elif token_ids_a is None:
return self._special_token_mask(snake_case__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _a ( self : int , snake_case__ : Optional[int] , snake_case__ : Dict=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 689 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 1 |
import string
import numpy
def UpperCamelCase_ ( a_ , a_ ) ->int:
return b if a == 0 else greatest_common_divisor(b % a , a_ )
class UpperCamelCase__:
"""simple docstring"""
_A = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_A = numpy.vectorize(lambda lowerCAmelCase__ : x % 3_6 )
_A = numpy.vectorize(lowerCAmelCase__ )
def __init__( self : Dict , snake_case__ : numpy.ndarray ):
"""simple docstring"""
A =self.modulus(snake_case__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
A =encrypt_key.shape[0]
def _a ( self : Any , snake_case__ : str ):
"""simple docstring"""
return self.key_string.index(snake_case__ )
def _a ( self : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
return self.key_string[round(snake_case__ )]
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A =det % len(self.key_string )
A =len(self.key_string )
if greatest_common_divisor(snake_case__ , len(self.key_string ) ) != 1:
A =(
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(snake_case__ )
def _a ( self : List[Any] , snake_case__ : str ):
"""simple docstring"""
A =[char for char in text.upper() if char in self.key_string]
A =chars[-1]
while len(snake_case__ ) % self.break_key != 0:
chars.append(snake_case__ )
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : str ):
"""simple docstring"""
A =self.process_text(text.upper() )
A =""
for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ):
A =text[i : i + self.break_key]
A =[self.replace_letters(snake_case__ ) for char in batch]
A =numpy.array([vec] ).T
A =self.modulus(self.encrypt_key.dot(snake_case__ ) ).T.tolist()[
0
]
A ="".join(
self.replace_digits(snake_case__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _a ( self : List[Any] ):
"""simple docstring"""
A =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A =det % len(self.key_string )
A =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
A =i
break
A =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(snake_case__ ) )
def _a ( self : List[Any] , snake_case__ : str ):
"""simple docstring"""
A =self.make_decrypt_key()
A =self.process_text(text.upper() )
A =""
for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ):
A =text[i : i + self.break_key]
A =[self.replace_letters(snake_case__ ) for char in batch]
A =numpy.array([vec] ).T
A =self.modulus(decrypt_key.dot(snake_case__ ) ).T.tolist()[0]
A ="".join(
self.replace_digits(snake_case__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def UpperCamelCase_ ( ) ->None:
A =int(input("Enter the order of the encryption key: " ) )
A =[]
print("Enter each row of the encryption key with space separated integers" )
for _ in range(a_ ):
A =[int(a_ ) for x in input().split()]
hill_matrix.append(a_ )
A =HillCipher(numpy.array(a_ ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
A =input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
A =input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(a_ ) )
elif option == "2":
A =input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 689 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__a = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(lowerCAmelCase__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "rag"
_A = True
def __init__( self : Optional[int] , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=True , snake_case__ : str=None , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : str=None , snake_case__ : Optional[Any]=None , snake_case__ : Dict=" / " , snake_case__ : List[Any]=" // " , snake_case__ : Tuple=5 , snake_case__ : Tuple=3_00 , snake_case__ : str=7_68 , snake_case__ : List[Any]=8 , snake_case__ : int="wiki_dpr" , snake_case__ : Optional[Any]="train" , snake_case__ : Tuple="compressed" , snake_case__ : int=None , snake_case__ : Any=None , snake_case__ : int=False , snake_case__ : Union[str, Any]=False , snake_case__ : List[Any]=0.0 , snake_case__ : str=True , snake_case__ : List[Any]=False , snake_case__ : Tuple=False , snake_case__ : str=False , snake_case__ : Any=True , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[int] , ):
"""simple docstring"""
super().__init__(
bos_token_id=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , prefix=snake_case__ , vocab_size=snake_case__ , **snake_case__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
A =kwargs.pop("question_encoder" )
A =question_encoder_config.pop("model_type" )
A =kwargs.pop("generator" )
A =decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
A =AutoConfig.for_model(snake_case__ , **snake_case__ )
A =AutoConfig.for_model(snake_case__ , **snake_case__ )
A =reduce_loss
A =label_smoothing
A =exclude_bos_score
A =do_marginalize
A =title_sep
A =doc_sep
A =n_docs
A =max_combined_length
A =dataset
A =dataset_split
A =index_name
A =retrieval_vector_size
A =retrieval_batch_size
A =passages_path
A =index_path
A =use_dummy_dataset
A =output_retrieved
A =do_deduplication
A =use_cache
if self.forced_eos_token_id is None:
A =getattr(self.generator , "forced_eos_token_id" , snake_case__ )
@classmethod
def _a ( cls : Union[str, Any] , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Dict ):
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case__ )
def _a ( self : List[str] ):
"""simple docstring"""
A =copy.deepcopy(self.__dict__ )
A =self.question_encoder.to_dict()
A =self.generator.to_dict()
A =self.__class__.model_type
return output
| 689 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__a = logging.get_logger("""transformers.models.speecht5""")
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Any:
hf_model.apply_weight_norm()
A =checkpoint["input_conv.weight_g"]
A =checkpoint["input_conv.weight_v"]
A =checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
A =checkpoint[f'''upsamples.{i}.1.weight_g''']
A =checkpoint[f'''upsamples.{i}.1.weight_v''']
A =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
A =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
A =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
A =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
A =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
A =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
A =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
A =checkpoint["output_conv.1.weight_g"]
A =checkpoint["output_conv.1.weight_v"]
A =checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ , a_ , a_=None , a_=None , ) ->str:
if config_path is not None:
A =SpeechTaHifiGanConfig.from_pretrained(a_ )
else:
A =SpeechTaHifiGanConfig()
A =SpeechTaHifiGan(a_ )
A =torch.load(a_ )
load_weights(orig_checkpoint["model"]["generator"] , a_ , a_ )
A =np.load(a_ )
A =stats[0].reshape(-1 )
A =stats[1].reshape(-1 )
A =torch.from_numpy(a_ ).float()
A =torch.from_numpy(a_ ).float()
model.save_pretrained(a_ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__a = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 689 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCamelCase_ ( a_ ) ->List[str]:
# vision encoder
if "img_encoder.pos_embed" in name:
A =name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
A =name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
A =name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
A =name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
A =name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
A =name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
A =name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
A =name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
A =name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
A =name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
A =name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
A =name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
A =name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
A =name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
A =name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
A =name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
A =name.replace("c_fc" , "fc1" )
if "c_proj" in name:
A =name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
A =name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
A =name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
A =name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
A =name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
A =name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
A =name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Union[str, Any]:
for key in orig_state_dict.copy().keys():
A =orig_state_dict.pop(a_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A =key.split("." )
A , A =int(key_split[2] ), int(key_split[4] )
A =config.vision_config.hidden_size
if "weight" in key:
A =val[:dim, :]
A =val[dim : dim * 2, :]
A =val[-dim:, :]
else:
A =val[:dim]
A =val[dim : dim * 2]
A =val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A =key.split("." )
A =int(key_split[3] )
A =config.text_config.hidden_size
if "weight" in key:
A =val[:dim, :]
A =val[
dim : dim * 2, :
]
A =val[-dim:, :]
else:
A =val[:dim]
A =val[dim : dim * 2]
A =val[-dim:]
else:
A =rename_key(a_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A =val.squeeze_()
else:
A =val
return orig_state_dict
def UpperCamelCase_ ( ) ->Dict:
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
A =Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ , a_="groupvit-gcc-yfcc" , a_=False ) ->List[str]:
A =GroupViTConfig()
A =GroupViTModel(a_ ).eval()
A =torch.load(a_ , map_location="cpu" )["model"]
A =convert_state_dict(a_ , a_ )
A , A =model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(a_ ) == 0)
# verify result
A =CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
A =prepare_img()
A =processor(text=["a photo of a cat", "a photo of a dog"] , images=a_ , padding=a_ , return_tensors="pt" )
with torch.no_grad():
A =model(**a_ )
if model_name == "groupvit-gcc-yfcc":
A =torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
A =torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , a_ , atol=1E-3 )
processor.save_pretrained(a_ )
model.save_pretrained(a_ )
print("Successfully saved processor and model to" , a_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(a_ , organization="nielsr" )
model.push_to_hub(a_ , organization="nielsr" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__a = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 689 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 689 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase_ ( ) ->Union[str, Any]:
A =ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=a_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=a_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=a_ )
return parser.parse_args()
def UpperCamelCase_ ( ) ->List[Any]:
A =parse_args()
# Import training_script as a module.
A =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A =script_fpath.stem
A =importlib.import_module(a_ )
# Patch sys.argv
A =[args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 689 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__a = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->List[Any]:
for attribute in key.split("." ):
A =getattr(a_ , a_ )
if weight_type is not None:
A =getattr(a_ , a_ ).shape
else:
A =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A =value
elif weight_type == "weight_g":
A =value
elif weight_type == "weight_v":
A =value
elif weight_type == "bias":
A =value
else:
A =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase_ ( a_ , a_ ) ->Optional[Any]:
A =[]
A =fairseq_model.state_dict()
A =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A =False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == "group" , )
A =True
else:
for key, mapped_key in MAPPING.items():
A ="unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
A =True
if "*" in mapped_key:
A =name.split(a_ )[0].split("." )[-2]
A =mapped_key.replace("*" , a_ )
if "weight_g" in name:
A ="weight_g"
elif "weight_v" in name:
A ="weight_v"
elif "bias" in name:
A ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A ="weight"
else:
A =None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->Optional[int]:
A =full_name.split("conv_layers." )[-1]
A =name.split("." )
A =int(items[0] )
A =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a_ )
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ , a_=None , a_=None , a_=True ) ->Optional[int]:
if config_path is not None:
A =UniSpeechSatConfig.from_pretrained(a_ )
else:
A =UniSpeechSatConfig()
A =""
if is_finetuned:
A =UniSpeechSatForCTC(a_ )
else:
A =UniSpeechSatForPreTraining(a_ )
A , A , A =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
A =model[0].eval()
recursively_load_weights(a_ , a_ )
hf_wavavec.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__a = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 689 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 1 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = BertJapaneseTokenizer
_A = False
_A = True
def _a ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
A =[
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _a ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
A ="こんにちは、世界。 \nこんばんは、世界。"
A ="こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _a ( self : Union[str, Any] , snake_case__ : Tuple ):
"""simple docstring"""
A , A =self.get_input_output_texts(snake_case__ )
A =tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
A =tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
return text, ids
def _a ( self : int ):
"""simple docstring"""
pass # TODO add if relevant
def _a ( self : List[Any] ):
"""simple docstring"""
pass # TODO add if relevant
def _a ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.tokenizer_class(self.vocab_file )
A =tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(snake_case__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(snake_case__ )
A ="こんにちは、世界。\nこんばんは、世界。"
A =tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
A =os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(snake_case__ , "wb" ) as handle:
pickle.dump(snake_case__ , snake_case__ )
with open(snake_case__ , "rb" ) as handle:
A =pickle.load(snake_case__ )
A =tokenizer_new.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self : str ):
"""simple docstring"""
try:
A =MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
try:
A =MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =MecabTokenizer(do_lower_case=snake_case__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self : List[str] ):
"""simple docstring"""
try:
A =MecabTokenizer(
do_lower_case=snake_case__ , normalize_text=snake_case__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _a ( self : str ):
"""simple docstring"""
A =MecabTokenizer(normalize_text=snake_case__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _a ( self : Tuple ):
"""simple docstring"""
A =self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(snake_case__ )
A ="こんにちは、世界。\nこんばんは、世界。"
A =tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
A =os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(snake_case__ , "wb" ) as handle:
pickle.dump(snake_case__ , snake_case__ )
with open(snake_case__ , "rb" ) as handle:
A =pickle.load(snake_case__ )
A =tokenizer_new.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@require_sudachi
def _a ( self : Optional[int] ):
"""simple docstring"""
A =SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _a ( self : Any ):
"""simple docstring"""
A =SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _a ( self : str ):
"""simple docstring"""
A =SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _a ( self : List[Any] ):
"""simple docstring"""
A =SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _a ( self : Any ):
"""simple docstring"""
A =SudachiTokenizer(do_lower_case=snake_case__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _a ( self : Optional[int] ):
"""simple docstring"""
A =SudachiTokenizer(normalize_text=snake_case__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _a ( self : Optional[int] ):
"""simple docstring"""
A =SudachiTokenizer(trim_whitespace=snake_case__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _a ( self : List[str] ):
"""simple docstring"""
A =self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(snake_case__ )
A ="こんにちは、世界。\nこんばんは、世界。"
A =tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
A =os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(snake_case__ , "wb" ) as handle:
pickle.dump(snake_case__ , snake_case__ )
with open(snake_case__ , "rb" ) as handle:
A =pickle.load(snake_case__ )
A =tokenizer_new.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@require_jumanpp
def _a ( self : List[Any] ):
"""simple docstring"""
A =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self : int ):
"""simple docstring"""
A =JumanppTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self : Tuple ):
"""simple docstring"""
A =JumanppTokenizer(normalize_text=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self : Any ):
"""simple docstring"""
A =JumanppTokenizer(trim_whitespace=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _a ( self : List[str] ):
"""simple docstring"""
A =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _a ( self : List[Any] ):
"""simple docstring"""
A =["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
A ={}
for i, token in enumerate(snake_case__ ):
A =i
A =WordpieceTokenizer(vocab=snake_case__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _a ( self : str ):
"""simple docstring"""
A =BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
A =tokenizer.subword_tokenizer
A =subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(snake_case__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
A =subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(snake_case__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
A =tokenizer.encode("ありがとう。" , add_special_tokens=snake_case__ )
A =tokenizer.encode("どういたしまして。" , add_special_tokens=snake_case__ )
A =tokenizer.build_inputs_with_special_tokens(snake_case__ )
A =tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = BertJapaneseTokenizer
_A = False
def _a ( self : List[str] ):
"""simple docstring"""
super().setUp()
A =["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _a ( self : str , **snake_case__ : Optional[int] ):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **snake_case__ )
def _a ( self : Dict , snake_case__ : int ):
"""simple docstring"""
A ="こんにちは、世界。 \nこんばんは、世界。"
A ="こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _a ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def _a ( self : Optional[int] ):
"""simple docstring"""
pass # TODO add if relevant
def _a ( self : int ):
"""simple docstring"""
pass # TODO add if relevant
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
A =tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
snake_case__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _a ( self : Any ):
"""simple docstring"""
A =["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
A ={}
for i, token in enumerate(snake_case__ ):
A =i
A =CharacterTokenizer(vocab=snake_case__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
A =tokenizer.encode("ありがとう。" , add_special_tokens=snake_case__ )
A =tokenizer.encode("どういたしまして。" , add_special_tokens=snake_case__ )
A =tokenizer.build_inputs_with_special_tokens(snake_case__ )
A =tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A ="cl-tohoku/bert-base-japanese"
A =AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A ="cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(snake_case__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
A ="bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(snake_case__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 689 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 1 |
from scipy.stats import spearmanr
import datasets
__a = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__a = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__a = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _a ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any]=False ):
"""simple docstring"""
A =spearmanr(snake_case__ , snake_case__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 689 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 1 |
import argparse
import copy
def UpperCamelCase_ ( a_ ) ->Tuple:
A ={}
with open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A =[]
_list.append([line.split()[1], line.split()[2]] )
A =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A =[]
_list.append([line.split()[0], line.split()[2]] )
A =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCamelCase_ ( a_ , a_ ) ->int:
with open(SCREAMING_SNAKE_CASE_ ) as f:
A =f.read(1 )
A =start_node
A =[]
A =start_node
A =0
while visiting not in first_solution:
A =1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE_ ) and k[0] not in first_solution:
A =k[1]
A =k[0]
first_solution.append(SCREAMING_SNAKE_CASE_ )
A =distance_of_first_solution + int(SCREAMING_SNAKE_CASE_ )
A =best_node
first_solution.append(SCREAMING_SNAKE_CASE_ )
A =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def UpperCamelCase_ ( a_ , a_ ) ->Any:
A =[]
for n in solution[1:-1]:
A =solution.index(SCREAMING_SNAKE_CASE_ )
for kn in solution[1:-1]:
A =solution.index(SCREAMING_SNAKE_CASE_ )
if n == kn:
continue
A =copy.deepcopy(SCREAMING_SNAKE_CASE_ )
A =kn
A =n
A =0
for k in _tmp[:-1]:
A =_tmp[_tmp.index(SCREAMING_SNAKE_CASE_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A =distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->Union[str, Any]:
A =1
A =first_solution
A =[]
A =distance_of_first_solution
A =solution
while count <= iters:
A =find_neighborhood(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A =0
A =neighborhood[index_of_best_solution]
A =len(SCREAMING_SNAKE_CASE_ ) - 1
A =False
while not found:
A =0
while i < len(SCREAMING_SNAKE_CASE_ ):
if best_solution[i] != solution[i]:
A =best_solution[i]
A =solution[i]
break
A =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A =True
A =best_solution[:-1]
A =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A =cost
A =solution
else:
A =index_of_best_solution + 1
A =neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE_ ) >= size:
tabu_list.pop(0 )
A =count + 1
return best_solution_ever, best_cost
def UpperCamelCase_ ( a_=None ) ->List[Any]:
A =generate_neighbours(args.File )
A , A =generate_first_solution(
args.File , SCREAMING_SNAKE_CASE_ )
A , A =tabu_search(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 700 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 0 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Optional[Any]:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__A )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->List[str]:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , __A )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__A , __A )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Any:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(__A ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 701 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( a_ ) ->int:
A =1
A =2
while i * i <= n:
A =0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase_ ( ) ->Tuple:
A =1
A =1
while True:
i += 1
t_num += i
if count_divisors(_SCREAMING_SNAKE_CASE ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 702 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : str , snake_case__ : str , snake_case__ : Dict=13 , snake_case__ : Optional[int]=7 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : int=False , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]=99 , snake_case__ : List[Any]=32 , snake_case__ : str=5 , snake_case__ : int=4 , snake_case__ : str=37 , snake_case__ : Dict="gelu" , snake_case__ : str=0.1 , snake_case__ : str=0.1 , snake_case__ : Optional[int]=5_12 , snake_case__ : Optional[int]=16 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]=0.02 , snake_case__ : str=3 , snake_case__ : Optional[int]=4 , snake_case__ : List[Any]=None , ):
"""simple docstring"""
A =parent
A =batch_size
A =seq_length
A =is_training
A =use_input_mask
A =use_token_type_ids
A =use_labels
A =vocab_size
A =hidden_size
A =num_hidden_layers
A =num_attention_heads
A =intermediate_size
A =hidden_act
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =type_vocab_size
A =type_sequence_label_size
A =initializer_range
A =num_labels
A =num_choices
A =scope
def _a ( self : List[str] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =None
if self.use_input_mask:
A =random_attention_mask([self.batch_size, self.seq_length] )
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , self.num_choices )
A =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[Any] ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , use_stable_embedding=A_ , )
def _a ( self : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
A =OpenLlamaModel(config=A_ )
model.to(A_ )
model.eval()
A =model(A_ , attention_mask=A_ )
A =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : str , ):
"""simple docstring"""
A =True
A =OpenLlamaModel(A_ )
model.to(A_ )
model.eval()
A =model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
A =model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
A =model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : int , snake_case__ : str , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int , ):
"""simple docstring"""
A =OpenLlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
A =model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =True
A =True
A =OpenLlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
A =model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
A =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A =ids_tensor((self.batch_size, 3) , config.vocab_size )
A =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A =torch.cat([input_ids, next_tokens] , dim=-1 )
A =torch.cat([input_mask, next_mask] , dim=-1 )
A =model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )["hidden_states"][0]
A =model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )["hidden_states"][0]
# select random slice
A =ids_tensor((1,) , output_from_past.shape[-1] ).item()
A =output_from_no_past[:, -3:, random_slice_idx].detach()
A =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self : List[str] ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_A = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_A = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = False
_A = False
def _a ( self : Any ):
"""simple docstring"""
A =OpenLlamaModelTester(self )
A =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A =type
self.model_tester.create_and_check_model(*A_ )
def _a ( self : Dict ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
A =3
A =input_dict["input_ids"]
A =input_ids.ne(1 ).to(A_ )
A =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A =OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A =model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Any ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
A =3
A ="single_label_classification"
A =input_dict["input_ids"]
A =input_ids.ne(1 ).to(A_ )
A =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A =OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A =model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : str ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
A =3
A ="multi_label_classification"
A =input_dict["input_ids"]
A =input_ids.ne(1 ).to(A_ )
A =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A =OpenLlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A =model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def _a ( self : Dict ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _a ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
A =ids_tensor([1, 10] , config.vocab_size )
A =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A =OpenLlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
A =original_model(A_ ).last_hidden_state
A =original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A ={"type": scaling_type, "factor": 10.0}
A =OpenLlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
A =scaled_model(A_ ).last_hidden_state
A =scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ , A_ , atol=1E-5 ) )
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCamelCase__( lowercase__ ):
"""simple docstring"""
_A = "wavlm"
def __init__( self : str , snake_case__ : Tuple=32 , snake_case__ : Optional[Any]=7_68 , snake_case__ : Optional[Any]=12 , snake_case__ : Union[str, Any]=12 , snake_case__ : Any=30_72 , snake_case__ : str="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=0.0 , snake_case__ : str=0.1 , snake_case__ : str=0.1 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : Optional[Any]="group" , snake_case__ : Dict="gelu" , snake_case__ : List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : int=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : str=False , snake_case__ : Union[str, Any]=1_28 , snake_case__ : Dict=16 , snake_case__ : Optional[Any]=3_20 , snake_case__ : int=8_00 , snake_case__ : Tuple=False , snake_case__ : Optional[Any]=True , snake_case__ : Optional[Any]=0.05 , snake_case__ : int=10 , snake_case__ : Optional[int]=2 , snake_case__ : Tuple=0.0 , snake_case__ : List[str]=10 , snake_case__ : List[Any]=3_20 , snake_case__ : Dict=2 , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=1_00 , snake_case__ : Any=2_56 , snake_case__ : Dict=2_56 , snake_case__ : Any=0.1 , snake_case__ : int="mean" , snake_case__ : Optional[int]=False , snake_case__ : int=False , snake_case__ : Dict=2_56 , snake_case__ : Dict=(5_12, 5_12, 5_12, 5_12, 15_00) , snake_case__ : List[Any]=(5, 3, 3, 1, 1) , snake_case__ : Any=(1, 2, 3, 1, 1) , snake_case__ : List[str]=5_12 , snake_case__ : Optional[Any]=80 , snake_case__ : Dict=0 , snake_case__ : int=1 , snake_case__ : Optional[Any]=2 , snake_case__ : int=False , snake_case__ : List[str]=3 , snake_case__ : Dict=2 , snake_case__ : List[str]=3 , snake_case__ : int=None , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
A =hidden_size
A =feat_extract_norm
A =feat_extract_activation
A =list(__lowerCamelCase )
A =list(__lowerCamelCase )
A =list(__lowerCamelCase )
A =conv_bias
A =num_buckets
A =max_bucket_distance
A =num_conv_pos_embeddings
A =num_conv_pos_embedding_groups
A =len(self.conv_dim )
A =num_hidden_layers
A =intermediate_size
A =hidden_act
A =num_attention_heads
A =hidden_dropout
A =attention_dropout
A =activation_dropout
A =feat_proj_dropout
A =final_dropout
A =layerdrop
A =layer_norm_eps
A =initializer_range
A =num_ctc_classes
A =vocab_size
A =do_stable_layer_norm
A =use_weighted_layer_sum
A =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A =apply_spec_augment
A =mask_time_prob
A =mask_time_length
A =mask_time_min_masks
A =mask_feature_prob
A =mask_feature_length
# parameters for pretraining with codevector quantized representations
A =num_codevectors_per_group
A =num_codevector_groups
A =contrastive_logits_temperature
A =num_negatives
A =codevector_dim
A =proj_codevector_dim
A =diversity_loss_weight
# ctc loss
A =ctc_loss_reduction
A =ctc_zero_infinity
# adapter
A =add_adapter
A =adapter_kernel_size
A =adapter_stride
A =num_adapter_layers
A =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A =list(__lowerCamelCase )
A =list(__lowerCamelCase )
A =list(__lowerCamelCase )
A =xvector_output_dim
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase_ ( a_ , a_=False ) ->List[str]:
try:
A =os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A =default
else:
# KEY is set, convert it to True or False.
try:
A =strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__a = parse_flag_from_env("""RUN_SLOW""", default=False)
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
return unittest.skip("Test was skipped" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->List[str]:
return unittest.skipUnless(_run_slow_tests , "test is slow" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Any:
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Optional[int]:
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->int:
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->str:
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Dict:
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Tuple:
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Optional[int]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->List[str]:
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Tuple:
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Optional[int]:
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_=None , a_=None ) ->Any:
if test_case is None:
return partial(lowerCamelCase_ , version=lowerCamelCase_ )
return unittest.skipUnless(is_torch_version(">=" , lowerCamelCase_ ) , f'''test requires torch version >= {version}''' )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Optional[int]:
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->int:
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(lowerCamelCase_ )
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(lowerCamelCase_ )
__a = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(lowerCamelCase_ )
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
_A = True
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A =tempfile.mkdtemp()
@classmethod
def _a ( cls : Any ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _a ( self : Dict ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase_ )
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict , snake_case__ : Union[mock.Mock, List[mock.Mock]] ):
"""simple docstring"""
A =mocks if isinstance(UpperCAmelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase_ ( a_ ) ->int:
A =AcceleratorState()
A =tensor[None].clone().to(state.device )
A =gather(lowerCamelCase_ ).cpu()
A =tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase_ ):
return False
return True
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
"""simple docstring"""
A =returncode
A =stdout
A =stderr
async def UpperCamelCase_ ( a_ , a_ ) ->Union[str, Any]:
while True:
A =await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def UpperCamelCase_ ( a_ , a_=None , a_=None , a_=None , a_=False , a_=False ) ->_RunOutput:
if echo:
print("\nRunning: " , " ".join(lowerCamelCase_ ) )
A =await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A =[]
A =[]
def tee(a_ , a_ , a_ , a_="" ):
A =line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label="stderr:" ) ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_=180 , a_=False , a_=True ) ->_RunOutput:
A =asyncio.get_event_loop()
A =loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
A =' '.join(lowerCamelCase_ )
if result.returncode > 0:
A ='\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class UpperCamelCase__( __lowerCamelCase ):
"""simple docstring"""
pass
def UpperCamelCase_ ( a_ , a_=False ) ->Dict:
try:
A =subprocess.check_output(lowerCamelCase_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase_ , "decode" ):
A =output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(lowerCamelCase_ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e | 706 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_A = TextToVideoSDPipeline
_A = TEXT_TO_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_A = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
A =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
A =CLIPTextModel(_lowercase )
A =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _a ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Any=0 ):
"""simple docstring"""
if str(_lowercase ).startswith("mps" ):
A =torch.manual_seed(_lowercase )
else:
A =torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _a ( self : List[Any] ):
"""simple docstring"""
A ='cpu' # ensure determinism for the device-dependent torch.Generator
A =self.get_dummy_components()
A =TextToVideoSDPipeline(**_lowercase )
A =sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
A =self.get_dummy_inputs(_lowercase )
A ='np'
A =sd_pipe(**_lowercase ).frames
A =frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A =np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : List[Any] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _a ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
A =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
A =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
A =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A =pipe.to("cuda" )
A ='Spiderman is surfing'
A =torch.Generator(device="cpu" ).manual_seed(0 )
A =pipe(_lowercase , generator=_lowercase , num_inference_steps=25 , output_type="pt" ).frames
A =video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _a ( self : str ):
"""simple docstring"""
A =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
A =TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
A =pipe.to("cuda" )
A ='Spiderman is surfing'
A =torch.Generator(device="cpu" ).manual_seed(0 )
A =pipe(_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="pt" ).frames
A =video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 707 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCamelCase__( __A ):
"""simple docstring"""
_A = "sew-d"
def __init__( self : Optional[Any] , snake_case__ : Any=32 , snake_case__ : str=7_68 , snake_case__ : List[Any]=12 , snake_case__ : List[str]=12 , snake_case__ : List[Any]=30_72 , snake_case__ : Optional[int]=2 , snake_case__ : List[str]=5_12 , snake_case__ : str=2_56 , snake_case__ : Any=True , snake_case__ : int=True , snake_case__ : str=("p2c", "c2p") , snake_case__ : Optional[Any]="layer_norm" , snake_case__ : Dict="gelu_python" , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[Any]=0.0 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.02 , snake_case__ : List[str]=1E-7 , snake_case__ : Dict=1E-5 , snake_case__ : Optional[Any]="group" , snake_case__ : str="gelu" , snake_case__ : str=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , snake_case__ : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__ : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__ : Union[str, Any]=False , snake_case__ : Dict=1_28 , snake_case__ : Union[str, Any]=16 , snake_case__ : int=True , snake_case__ : List[str]=0.05 , snake_case__ : Tuple=10 , snake_case__ : Any=2 , snake_case__ : Dict=0.0 , snake_case__ : Tuple=10 , snake_case__ : List[str]=0 , snake_case__ : Optional[Any]="mean" , snake_case__ : Any=False , snake_case__ : int=False , snake_case__ : List[str]=2_56 , snake_case__ : str=0 , snake_case__ : List[str]=1 , snake_case__ : Tuple=2 , **snake_case__ : int , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
A =hidden_size
A =feat_extract_norm
A =feat_extract_activation
A =list(snake_case__ )
A =list(snake_case__ )
A =list(snake_case__ )
A =conv_bias
A =num_conv_pos_embeddings
A =num_conv_pos_embedding_groups
A =len(self.conv_dim )
A =num_hidden_layers
A =intermediate_size
A =squeeze_factor
A =max_position_embeddings
A =position_buckets
A =share_att_key
A =relative_attention
A =norm_rel_ebd
A =list(snake_case__ )
A =hidden_act
A =num_attention_heads
A =hidden_dropout
A =attention_dropout
A =activation_dropout
A =feat_proj_dropout
A =final_dropout
A =layer_norm_eps
A =feature_layer_norm_eps
A =initializer_range
A =vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A =apply_spec_augment
A =mask_time_prob
A =mask_time_length
A =mask_time_min_masks
A =mask_feature_prob
A =mask_feature_length
A =mask_feature_min_masks
# ctc loss
A =ctc_loss_reduction
A =ctc_zero_infinity
# sequence classification
A =use_weighted_layer_sum
A =classifier_proj_size
@property
def _a ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 708 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def UpperCamelCase_ ( a_ , a_ , a_ = 1_6000 ) ->List[Any]:
A =int(round(sample_rate * max_length ) )
if len(__lowerCAmelCase ) <= sample_length:
return wav
A =randint(0 , len(__lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = field(default=lowerCAmelCase__ , metadata={"help": "Name of a dataset from the datasets package"} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "A file containing the training audio paths and labels."} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "A file containing the validation audio paths and labels."} )
_A = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
_A = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
_A = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to \'audio\'"} , )
_A = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to \'label\'"} )
_A = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_A = field(
default=lowerCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_A = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
_A = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
_A = field(
default=lowerCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
_A = field(
default=lowerCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _a ( self : Tuple ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def UpperCamelCase_ ( ) ->Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A =training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
A =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
A =DatasetDict()
A =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
A =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
f'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
A =AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
A =raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
A =feature_extractor.model_input_names[0]
def train_transforms(a_ ):
A =[]
for audio in batch[data_args.audio_column_name]:
A =random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__lowerCAmelCase )
A =feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
A ={model_input_name: inputs.get(__lowerCAmelCase )}
A =list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(a_ ):
A =[audio["array"] for audio in batch[data_args.audio_column_name]]
A =feature_extractor(__lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
A ={model_input_name: inputs.get(__lowerCAmelCase )}
A =list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A =raw_datasets["train"].features[data_args.label_column_name].names
A , A ={}, {}
for i, label in enumerate(__lowerCAmelCase ):
A =str(__lowerCAmelCase )
A =label
# Load the accuracy metric from the datasets package
A =evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(a_ ):
A =np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=eval_pred.label_ids )
A =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A =AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
A =(
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A =(
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__lowerCAmelCase , output_all_columns=__lowerCAmelCase )
# Initialize our trainer
A =Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
# Training
if training_args.do_train:
A =None
if training_args.resume_from_checkpoint is not None:
A =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A =last_checkpoint
A =trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A =trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
A ={
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 709 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = 42
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , snake_case__ : Any = 32 , snake_case__ : Union[str, Any] = 64 , snake_case__ : Optional[int] = 20 , snake_case__ : Any = 7_68 , snake_case__ : Tuple=77 , snake_case__ : Dict=4 , snake_case__ : int = 0.0 , snake_case__ : int = "silu" , snake_case__ : List[Any] = None , snake_case__ : Optional[Any] = None , snake_case__ : Union[str, Any] = "linear" , snake_case__ : Any = "prd" , snake_case__ : List[Any] = None , snake_case__ : Optional[Any] = None , snake_case__ : Union[str, Any] = None , ):
"""simple docstring"""
super().__init__()
A =num_attention_heads
A =attention_head_dim
A =num_attention_heads * attention_head_dim
A =additional_embeddings
A =time_embed_dim or inner_dim
A =embedding_proj_dim or embedding_dim
A =clip_embed_dim or embedding_dim
A =Timesteps(UpperCamelCase_ , UpperCamelCase_ , 0 )
A =TimestepEmbedding(UpperCamelCase_ , UpperCamelCase_ , out_dim=UpperCamelCase_ , act_fn=UpperCamelCase_ )
A =nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
if embedding_proj_norm_type is None:
A =None
elif embedding_proj_norm_type == "layer":
A =nn.LayerNorm(UpperCamelCase_ )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
A =nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
if encoder_hid_proj_type is None:
A =None
elif encoder_hid_proj_type == "linear":
A =nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
A =nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase_ ) )
if added_emb_type == "prd":
A =nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase_ ) )
elif added_emb_type is None:
A =None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
A =nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn="gelu" , attention_bias=UpperCamelCase_ , )
for d in range(UpperCamelCase_ )
] )
if norm_in_type == "layer":
A =nn.LayerNorm(UpperCamelCase_ )
elif norm_in_type is None:
A =None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
A =nn.LayerNorm(UpperCamelCase_ )
A =nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
A =torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
A =causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , UpperCamelCase_ , persistent=UpperCamelCase_ )
A =nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
A =nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _a ( self : Dict ):
"""simple docstring"""
A ={}
def fn_recursive_add_processors(snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple ):
if hasattr(UpperCamelCase_ , "set_processor" ):
A =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return processors
def _a ( self : Optional[int] , snake_case__ : Optional[int] ):
"""simple docstring"""
A =len(self.attn_processors.keys() )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
if hasattr(UpperCamelCase_ , "set_processor" ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
module.set_processor(UpperCamelCase_ )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , UpperCamelCase_ , UpperCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def _a ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : int = None , snake_case__ : Any = None , snake_case__ : Optional[int] = True , ):
"""simple docstring"""
A =hidden_states.shape[0]
A =timestep
if not torch.is_tensor(UpperCamelCase_ ):
A =torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
A =timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A =timesteps * torch.ones(UpperCamelCase_ , dtype=timesteps.dtype , device=timesteps.device )
A =self.time_proj(UpperCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
A =timesteps_projected.to(dtype=self.dtype )
A =self.time_embedding(UpperCamelCase_ )
if self.embedding_proj_norm is not None:
A =self.embedding_proj_norm(UpperCamelCase_ )
A =self.embedding_proj(UpperCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
A =self.encoder_hidden_states_proj(UpperCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
A =self.proj_in(UpperCamelCase_ )
A =self.positional_embedding.to(hidden_states.dtype )
A =[]
A =0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
A =proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
A =hidden_states[:, None, :]
A =additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
A =self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase_ , -1 , -1 )
additional_embeds.append(UpperCamelCase_ )
A =torch.cat(
UpperCamelCase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
A =additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
A =F.pad(
UpperCamelCase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
A =hidden_states + positional_embeddings
if attention_mask is not None:
A =(1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
A =F.pad(UpperCamelCase_ , (0, self.additional_embeddings) , value=0.0 )
A =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
A =attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
A =self.norm_in(UpperCamelCase_ )
for block in self.transformer_blocks:
A =block(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
A =self.norm_out(UpperCamelCase_ )
if self.prd_embedding is not None:
A =hidden_states[:, -1]
else:
A =hidden_states[:, additional_embeddings_len:]
A =self.proj_to_clip_embeddings(UpperCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase_ )
def _a ( self : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
A =(prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 710 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case__ : int , snake_case__ : int = 3 , snake_case__ : int = 1 , snake_case__ : int = 1 , snake_case__ : Optional[str] = "relu" , **snake_case__ : str , ):
"""simple docstring"""
super().__init__(**__A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A =tf.keras.layers.ConvaD(
filters=__A , kernel_size=__A , strides=__A , padding="VALID" , groups=__A , use_bias=__A , name="convolution" , )
A =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
A =ACTaFN[activation] if activation is not None else tf.identity
def _a ( self : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
A =self.convolution(self.padding(__A ) )
A =self.normalization(__A )
A =self.activation(__A )
return hidden_state
class UpperCamelCase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : RegNetConfig , **snake_case__ : List[Any] ):
"""simple docstring"""
super().__init__(**__A )
A =config.num_channels
A =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def _a ( self : Dict , snake_case__ : str ):
"""simple docstring"""
A =shape_list(__A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A =tf.transpose(__A , perm=(0, 2, 3, 1) )
A =self.embedder(__A )
return hidden_state
class UpperCamelCase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : int = 2 , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(**__A )
A =tf.keras.layers.ConvaD(
filters=__A , kernel_size=1 , strides=__A , use_bias=__A , name="convolution" )
A =tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def _a ( self : str , snake_case__ : tf.Tensor , snake_case__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__A ) , training=__A )
class UpperCamelCase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : int , **snake_case__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__A )
A =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__A , name="pooler" )
A =[
tf.keras.layers.ConvaD(filters=__A , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__A , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def _a ( self : Optional[int] , snake_case__ : int ):
"""simple docstring"""
A =self.pooler(__A )
for layer_module in self.attention:
A =layer_module(__A )
A =hidden_state * pooled
return hidden_state
class UpperCamelCase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : str ):
"""simple docstring"""
super().__init__(**__A )
A =in_channels != out_channels or stride != 1
A =max(1 , out_channels // config.groups_width )
A =(
TFRegNetShortCut(__A , stride=__A , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A =[
TFRegNetConvLayer(__A , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__A , stride=__A , groups=__A , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__A , kernel_size=1 , activation=__A , name="layer.2" ),
]
A =ACTaFN[config.hidden_act]
def _a ( self : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
A =hidden_state
for layer_module in self.layers:
A =layer_module(__A )
A =self.shortcut(__A )
hidden_state += residual
A =self.activation(__A )
return hidden_state
class UpperCamelCase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(**__A )
A =in_channels != out_channels or stride != 1
A =max(1 , out_channels // config.groups_width )
A =(
TFRegNetShortCut(__A , stride=__A , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
A =[
TFRegNetConvLayer(__A , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__A , stride=__A , groups=__A , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__A , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__A , kernel_size=1 , activation=__A , name="layer.3" ),
]
A =ACTaFN[config.hidden_act]
def _a ( self : str , snake_case__ : str ):
"""simple docstring"""
A =hidden_state
for layer_module in self.layers:
A =layer_module(__A )
A =self.shortcut(__A )
hidden_state += residual
A =self.activation(__A )
return hidden_state
class UpperCamelCase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 2 , **snake_case__ : Dict ):
"""simple docstring"""
super().__init__(**__A )
A =TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
A =[
# downsampling is done in the first layer with stride of 2
layer(__A , __A , __A , stride=__A , name="layers.0" ),
*[layer(__A , __A , __A , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _a ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
for layer_module in self.layers:
A =layer_module(__A )
return hidden_state
class UpperCamelCase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , snake_case__ : RegNetConfig , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__A )
A =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
A =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__A , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__A , __A , __A , depth=__A , name=f'''stages.{i+1}''' ) )
def _a ( self : Optional[int] , snake_case__ : tf.Tensor , snake_case__ : bool = False , snake_case__ : bool = True ):
"""simple docstring"""
A =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A =hidden_states + (hidden_state,)
A =stage_module(__A )
if output_hidden_states:
A =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
@keras_serializable
class UpperCamelCase__( tf.keras.layers.Layer ):
"""simple docstring"""
_A = RegNetConfig
def __init__( self : Dict , snake_case__ : List[str] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__A )
A =config
A =TFRegNetEmbeddings(__A , name="embedder" )
A =TFRegNetEncoder(__A , name="encoder" )
A =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__A , name="pooler" )
@unpack_inputs
def _a ( self : Union[str, Any] , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , ):
"""simple docstring"""
A =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A =return_dict if return_dict is not None else self.config.use_return_dict
A =self.embedder(__A , training=__A )
A =self.encoder(
__A , output_hidden_states=__A , return_dict=__A , training=__A )
A =encoder_outputs[0]
A =self.pooler(__A )
# Change to NCHW output format have uniformity in the modules
A =tf.transpose(__A , perm=(0, 3, 1, 2) )
A =tf.transpose(__A , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A =tuple([tf.transpose(__A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__A , pooler_output=__A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = RegNetConfig
_A = "regnet"
_A = "pixel_values"
@property
def _a ( self : List[str] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
__a = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case__ : RegNetConfig , *snake_case__ : int , **snake_case__ : Optional[Any] ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
A =TFRegNetMainLayer(__A , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self : str , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int]=False , ):
"""simple docstring"""
A =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A =return_dict if return_dict is not None else self.config.use_return_dict
A =self.regnet(
pixel_values=__A , output_hidden_states=__A , return_dict=__A , training=__A , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : RegNetConfig , *snake_case__ : Optional[int] , **snake_case__ : Tuple ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
A =config.num_labels
A =TFRegNetMainLayer(__A , name="regnet" )
# classification head
A =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self : Optional[Any] , snake_case__ : tf.Tensor = None , snake_case__ : tf.Tensor = None , snake_case__ : bool = None , snake_case__ : bool = None , snake_case__ : Union[str, Any]=False , ):
"""simple docstring"""
A =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A =return_dict if return_dict is not None else self.config.use_return_dict
A =self.regnet(
__A , output_hidden_states=__A , return_dict=__A , training=__A )
A =outputs.pooler_output if return_dict else outputs[1]
A =self.classifier[0](__A )
A =self.classifier[1](__A )
A =None if labels is None else self.hf_compute_loss(labels=__A , logits=__A )
if not return_dict:
A =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
| 711 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 0 |
__a = {}
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A =(days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A =_calculate(days - 1 , __lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A =_calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A =_calculate(days - 1 , __lowerCAmelCase , 0 )
A =state_late + state_absent + state_ontime
A =prizestrings
return prizestrings
def UpperCamelCase_ ( a_ = 30 ) ->int:
return _calculate(__lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 712 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = torch.device("""cpu""")
def UpperCamelCase_ ( ) ->str:
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
A =Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
def UpperCamelCase_ ( a_ ) ->List[str]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->List[Any]:
A =dct.pop(__UpperCAmelCase )
A =val
def UpperCamelCase_ ( a_ ) ->Tuple:
A =[]
for k in state_dict.keys():
A =k
if ".pwconv" in k:
A =k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
A =k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
A =k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
A =k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
A =k_new.split("." )
if ls[2].isdigit():
A ="swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
A =k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ , a_ ) ->List[Any]:
A =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
A =1000
A ="huggingface/label-files"
A ="imagenet-1k-id2label.json"
A =json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="dataset" ) , "r" ) )
A ={int(__UpperCAmelCase ): v for k, v in idalabel.items()}
A =idalabel
A ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
A =[3, 3, 6, 4]
A =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
A =[3, 3, 9, 6]
A =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
A =[4, 3, 10, 5]
A =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
A =[4, 4, 12, 6]
A =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
A =torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="cpu" , check_hash=__UpperCAmelCase )
else:
A =torch.load(__UpperCAmelCase , map_location="cpu" )
A =checkpoint
A =create_rename_keys(__UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
A =SwiftFormerForImageClassification(__UpperCAmelCase ).eval()
hf_model.load_state_dict(__UpperCAmelCase )
# prepare test inputs
A =prepare_img()
A =ViTImageProcessor.from_pretrained("preprocessor_config" )
A =processor(images=__UpperCAmelCase , return_tensors="pt" )
# compare outputs from both models
A =get_expected_output(__UpperCAmelCase )
A =hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __UpperCAmelCase , atol=1E-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__a = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 713 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__a = 8
def UpperCamelCase_ ( a_ , a_=BITS ) ->Dict:
A =x.device
A =(x * 255).int().clamp(0 , 255 )
A =2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCAmelCase__ )
A =rearrange(lowerCAmelCase__ , "d -> d 1 1" )
A =rearrange(lowerCAmelCase__ , "b c h w -> b c 1 h w" )
A =((x & mask) != 0).float()
A =rearrange(lowerCAmelCase__ , "b c d h w -> b (c d) h w" )
A =bits * 2 - 1
return bits
def UpperCamelCase_ ( a_ , a_=BITS ) ->Any:
A =x.device
A =(x > 0).int()
A =2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCAmelCase__ , dtype=torch.intaa )
A =rearrange(lowerCAmelCase__ , "d -> d 1 1" )
A =rearrange(lowerCAmelCase__ , "b (c d) h w -> b c d h w" , d=8 )
A =reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def UpperCamelCase_ ( self , a_ , a_ , a_ , a_ = 0.0 , a_ = True , a_=None , a_ = True , ) ->Tuple:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A =timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A =self.alphas_cumprod[timestep]
A =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A =self.bit_scale
if self.config.clip_sample:
A =torch.clamp(lowerCAmelCase__ , -scale , lowerCAmelCase__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A =self._get_variance(lowerCAmelCase__ , lowerCAmelCase__ )
A =eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A =(1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A =model_output.device if torch.is_tensor(lowerCAmelCase__ ) else "cpu"
A =torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase__ ).to(lowerCAmelCase__ )
A =self._get_variance(lowerCAmelCase__ , lowerCAmelCase__ ) ** 0.5 * eta * noise
A =prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def UpperCamelCase_ ( self , a_ , a_ , a_ , a_="epsilon" , a_=None , a_ = True , ) ->str:
A =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A =torch.split(lowerCAmelCase__ , sample.shape[1] , dim=1 )
else:
A =None
# 1. compute alphas, betas
A =self.alphas_cumprod[t]
A =self.alphas_cumprod[t - 1] if t > 0 else self.one
A =1 - alpha_prod_t
A =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A =model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
A =self.bit_scale
if self.config.clip_sample:
A =torch.clamp(lowerCAmelCase__ , -scale , lowerCAmelCase__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A =(alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A =self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A =0
if t > 0:
A =torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCAmelCase__ ).to(model_output.device )
A =(self._get_variance(lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ ) ** 0.5) * noise
A =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : UNetaDConditionModel , snake_case__ : Union[DDIMScheduler, DDPMScheduler] , snake_case__ : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
A =bit_scale
A =(
ddim_bit_scheduler_step if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Dict , snake_case__ : Optional[int] = 2_56 , snake_case__ : Optional[int] = 2_56 , snake_case__ : Optional[int] = 50 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , **snake_case__ : List[Any] , ):
"""simple docstring"""
A =torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_UpperCAmelCase , )
A =decimal_to_bits(_UpperCAmelCase ) * self.bit_scale
A =latents.to(self.device )
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A =self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A =self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
A =bits_to_decimal(_UpperCAmelCase )
if output_type == "pil":
A =self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 714 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__a = ["text", "image", "audio"]
def UpperCamelCase_ ( a_ ) ->Optional[int]:
A =[]
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
A =[]
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class UpperCamelCase__:
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
A =self.tool.inputs
for _input in inputs:
if isinstance(_input , _UpperCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
A =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _a ( self : str ):
"""simple docstring"""
A =create_inputs(self.tool.inputs )
A =self.tool(*_UpperCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
A =[outputs]
self.assertListEqual(output_types(_UpperCamelCase ) , self.tool.outputs )
def _a ( self : int ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _a ( self : List[str] ):
"""simple docstring"""
A =create_inputs(self.tool.inputs )
A =self.tool(*_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
A =[outputs]
self.assertEqual(len(_UpperCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(_UpperCamelCase , self.tool.outputs ):
A =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
def _a ( self : Dict ):
"""simple docstring"""
A =create_inputs(self.tool.inputs )
A =[]
for _input, input_type in zip(_UpperCamelCase , self.tool.inputs ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
A =self.tool(*_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
A =[outputs]
self.assertEqual(len(_UpperCamelCase ) , len(self.tool.outputs ) )
| 715 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : List[str]=32 , snake_case__ : List[str]=3 , snake_case__ : Any=10 , snake_case__ : Optional[Any]=[10, 20, 30, 40] , snake_case__ : List[Any]=[1, 1, 2, 1] , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : List[str]="relu" , snake_case__ : List[str]=3 , snake_case__ : Union[str, Any]=None , ):
"""simple docstring"""
A =parent
A =batch_size
A =image_size
A =num_channels
A =embeddings_size
A =hidden_sizes
A =depths
A =is_training
A =use_labels
A =hidden_act
A =num_labels
A =scope
A =len(_A )
def _a ( self : Tuple ):
"""simple docstring"""
A =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.num_labels )
A =self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _a ( self : Any , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Any ):
"""simple docstring"""
A =TFRegNetModel(config=_A )
A =model(_A , training=_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : int , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
A =self.num_labels
A =TFRegNetForImageClassification(_A )
A =model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
A =config_and_inputs
A ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
_A = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_A = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def _a ( self : Tuple ):
"""simple docstring"""
A =TFRegNetModelTester(self )
A =ConfigTester(self , config_class=_A , has_text_modality=_A )
def _a ( self : int ):
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _a ( self : int ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A =model_class(_A )
A =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A =[*signature.parameters.keys()]
A =['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def _a ( self : int ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : List[Any] ):
A =model_class(_A )
A =model(**self._prepare_for_class(_A , _A ) , training=_A )
A =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A =self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A =self.model_tester.prepare_config_and_inputs_for_common()
A =['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A =layer_type
A =True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A =True
check_hidden_states_output(_A , _A , _A )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[Any]={} ):
A =model(_A , return_dict=_A , **_A )
A =model(_A , return_dict=_A , **_A ).to_tuple()
def recursive_check(snake_case__ : Tuple , snake_case__ : int ):
if isinstance(_A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_A , _A ):
recursive_check(_A , _A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_A , _A ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(_A , _A )
for model_class in self.all_model_classes:
A =model_class(_A )
A =self._prepare_for_class(_A , _A )
A =self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A )
A =self._prepare_for_class(_A , _A , return_labels=_A )
A =self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A )
A =self._prepare_for_class(_A , _A )
A =self._prepare_for_class(_A , _A )
check_equivalence(_A , _A , _A , {"output_hidden_states": True} )
A =self._prepare_for_class(_A , _A , return_labels=_A )
A =self._prepare_for_class(_A , _A , return_labels=_A )
check_equivalence(_A , _A , _A , {"output_hidden_states": True} )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFRegNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( ) ->str:
A =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A =TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A =self.default_image_processor
A =prepare_img()
A =image_processor(images=_A , return_tensors="tf" )
# forward pass
A =model(**_A , training=_A )
# verify the logits
A =tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
A =tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__a = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__a = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__a = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
__a = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
__a = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
__a = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
__a = tf.keras.preprocessing.image.img_to_array(test_image)
__a = np.expand_dims(test_image, axis=0)
__a = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__a = """Normal"""
if result[0][0] == 1:
__a = """Abnormality detected"""
| 717 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
_A = 4_2
_A = 4_2
_A = 0.0
_A = 1
_A = 1
_A = True
_A = False
_A = False
_A = False
_A = jnp.floataa
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =[]
A =[]
for i in range(self.num_layers ):
A =self.in_channels if i == 0 else self.out_channels
A =FlaxResnetBlockaD(
in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
A =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
A =resnets
A =attentions
if self.add_downsample:
A =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Tuple=True ):
"""simple docstring"""
A =()
for resnet, attn in zip(self.resnets , self.attentions ):
A =resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
A =attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
A =self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
_A = 4_2
_A = 4_2
_A = 0.0
_A = 1
_A = True
_A = jnp.floataa
def _a ( self : List[Any] ):
"""simple docstring"""
A =[]
for i in range(self.num_layers ):
A =self.in_channels if i == 0 else self.out_channels
A =FlaxResnetBlockaD(
in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
A =resnets
if self.add_downsample:
A =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str]=True ):
"""simple docstring"""
A =()
for resnet in self.resnets:
A =resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
A =self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
_A = 4_2
_A = 4_2
_A = 4_2
_A = 0.0
_A = 1
_A = 1
_A = True
_A = False
_A = False
_A = False
_A = jnp.floataa
def _a ( self : Any ):
"""simple docstring"""
A =[]
A =[]
for i in range(self.num_layers ):
A =self.in_channels if (i == self.num_layers - 1) else self.out_channels
A =self.prev_output_channel if i == 0 else self.out_channels
A =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
A =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
A =resnets
A =attentions
if self.add_upsample:
A =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
A =res_hidden_states_tuple[-1]
A =res_hidden_states_tuple[:-1]
A =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A =resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
A =attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
if self.add_upsample:
A =self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
_A = 4_2
_A = 4_2
_A = 4_2
_A = 0.0
_A = 1
_A = True
_A = jnp.floataa
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =[]
for i in range(self.num_layers ):
A =self.in_channels if (i == self.num_layers - 1) else self.out_channels
A =self.prev_output_channel if i == 0 else self.out_channels
A =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
A =resnets
if self.add_upsample:
A =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : int=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
A =res_hidden_states_tuple[-1]
A =res_hidden_states_tuple[:-1]
A =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A =resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
if self.add_upsample:
A =self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
_A = 4_2
_A = 0.0
_A = 1
_A = 1
_A = False
_A = False
_A = jnp.floataa
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
A =[]
for _ in range(self.num_layers ):
A =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase_ )
A =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase_ )
A =resnets
A =attentions
def __call__( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Any=True ):
"""simple docstring"""
A =self.resnets[0](lowerCamelCase_ , lowerCamelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
A =attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
A =resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ )
return hidden_states
| 718 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->int:
A =original_name.split("." )[0]
A =key.split("." )
A =int(key_list[key_list.index(_A ) - 2] )
A =int(key_list[key_list.index(_A ) - 1] )
A =orig_block_num - offset
A =key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
A =OrderedDict()
A , A =0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A =key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A =key[: key.find("proj" )]
A =key.replace(_A , f'''patch_embeddings.{total_embed_found}.''' )
A =key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A ="poolformer.encoder." + key
if "mlp.fc1" in key:
A =replace_key_with_offset(_A , _A , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
A =replace_key_with_offset(_A , _A , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
A =replace_key_with_offset(_A , _A , "norm1" , "before_norm" )
if "norm2" in key:
A =replace_key_with_offset(_A , _A , "norm2" , "after_norm" )
if "layer_scale_1" in key:
A =replace_key_with_offset(_A , _A , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
A =replace_key_with_offset(_A , _A , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
A =key.replace("head" , "classifier" )
A =value
return new_state_dict
def UpperCamelCase_ ( ) ->Optional[int]:
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
A =Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Optional[Any]:
A =PoolFormerConfig()
# set attributes based on model_name
A ="huggingface/label-files"
A =model_name[-3:]
A =1000
A ="imagenet-1k-id2label.json"
A =(1, 1000)
# set config attributes
A =json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
A ={int(_A ): v for k, v in idalabel.items()}
A =idalabel
A ={v: k for k, v in idalabel.items()}
if size == "s12":
A =[2, 2, 6, 2]
A =[64, 128, 320, 512]
A =4.0
A =0.9
elif size == "s24":
A =[4, 4, 12, 4]
A =[64, 128, 320, 512]
A =4.0
A =0.9
elif size == "s36":
A =[6, 6, 18, 6]
A =[64, 128, 320, 512]
A =4.0
A =1E-6
A =0.9
elif size == "m36":
A =[6, 6, 18, 6]
A =[96, 192, 384, 768]
A =4.0
A =1E-6
A =0.95
elif size == "m48":
A =[8, 8, 24, 8]
A =[96, 192, 384, 768]
A =4.0
A =1E-6
A =0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A =PoolFormerImageProcessor(crop_pct=_A )
# Prepare image
A =prepare_img()
A =image_processor(images=_A , return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A =torch.load(_A , map_location=torch.device("cpu" ) )
# rename keys
A =rename_keys(_A )
# create HuggingFace model and load state dict
A =PoolFormerForImageClassification(_A )
model.load_state_dict(_A )
model.eval()
# Define image processor
A =PoolFormerImageProcessor(crop_pct=_A )
A =image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
A =model(_A )
A =outputs.logits
# define expected logit slices for different models
if size == "s12":
A =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _A , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__a = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 719 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase_ ( a_ , a_ ) ->str:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A =flax_key_tuple[:-1] + ("weight",)
A =torch.permute(a_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a_ ):
# linear layer
A =flax_key_tuple[:-1] + ("weight",)
A =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A =flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Tuple:
if "metadata" in layer:
A =layer.split("metadata" )
A ="".join(split_layer[0] )[:-1]
A =[tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
A =layer.split("kvstore" )
A ="".join(split_layer[0] )[:-1]
A =[tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
A =layer.split("/" )
A ="/".join(split_layer[:-1] )
A =(split_layer[-1],)
if "kvstore/path" in layer:
A =f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
A ="file"
else:
A =checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase_ ( a_ , a_ ) ->List[str]:
A =rename_keys(a_ )
A ={}
for k, v in current_block.items():
A =v
A =new_current_block
torch.save(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ = WEIGHTS_NAME ) ->Union[str, Any]:
A =convert_file_size_to_int(a_ )
A =[]
A ={}
A =0
A =0
os.makedirs(a_ , exist_ok=a_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
A =serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
A =flatten_dict(a_ , sep="/" )
A ={}
for layer in checkpoint_info.keys():
A , A , A =get_key_and_tensorstore_dict(
a_ , a_ , a_ )
if curr_real_layer_name in all_layers:
A =content
else:
A ={split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A =torch.tensor(a_ )
A =raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A , A =rename_base_flax_keys(tuple(key.split("/" ) ) , a_ )
A ="/".join(a_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A =os.path.join(
a_ , weights_name.replace(".bin" , f'''-{len(a_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(a_ , a_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A ={}
A =0
A =raw_weights.to(getattr(a_ , a_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A =os.path.join(a_ , weights_name.replace(".bin" , f'''-{len(a_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(a_ , a_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A ={}
A ={}
for idx, shard in enumerate(a_ ):
A =weights_name.replace(
".bin" , f'''-{idx+1:05d}-of-{len(a_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
A =os.path.join(a_ , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(a_ , os.path.join(a_ , a_ ) )
A =shard
for key in shard:
A =shard_file
# Add the metadata
A ={"total_size": total_size}
A ={"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a_ , a_ ) , "w" , encoding="utf-8" ) as f:
A =json.dumps(a_ , indent=2 , sort_keys=a_ ) + "\n"
f.write(a_ )
return metadata, index
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__a = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase_ ( ) ->Union[str, Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A =SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
A =SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
A =TaTokenizer.from_pretrained("t5-small" )
A ="A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
A =tokenizer(a_ , return_tensors="pt" ).input_ids
A =model.generate(a_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 720 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__a = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
__a = {
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
__a = '''▁'''
class UpperCamelCase__( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = BigBirdTokenizer
_A = ["input_ids", "attention_mask"]
_A = []
def __init__( self : Any , snake_case__ : Dict=None , snake_case__ : str=None , snake_case__ : int="<unk>" , snake_case__ : int="<s>" , snake_case__ : Any="</s>" , snake_case__ : List[Any]="<pad>" , snake_case__ : str="[SEP]" , snake_case__ : Optional[Any]="[MASK]" , snake_case__ : List[str]="[CLS]" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
A =AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
A =AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
A =AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
A =AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
A =AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
A =AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A =AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
A =vocab_file
A =False if not self.vocab_file else True
def _a ( self : int , snake_case__ : Optional[int] , snake_case__ : Optional[int] = None ):
"""simple docstring"""
A =[self.sep_token_id]
A =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : int = None , snake_case__ : Union[str, Any] = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def _a ( self : int , snake_case__ : Tuple , snake_case__ : int = None ):
"""simple docstring"""
A =[self.sep_token_id]
A =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : int = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 721 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__a = True
except ImportError:
__a = False
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( a_ ) ->Tuple:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCamelCase__( _lowercase ):
"""simple docstring"""
@staticmethod
def _a ( snake_case__ : ArgumentParser ):
"""simple docstring"""
A =parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=A_ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=A_ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple , snake_case__ : bool , snake_case__ : str , snake_case__ : Tuple=None , *snake_case__ : List[str] ):
"""simple docstring"""
A =testing
A =testing_file
A =path
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A =[directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
A =(
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A =path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
A =json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=A_ , extra_context=A_ , )
A =[directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
A =json.load(A_ )
A =configuration["lowercase_modelname"]
A =configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'''{directory}/configuration.json''' )
A ="PyTorch" in generate_tensorflow_pytorch_and_flax
A ="TensorFlow" in generate_tensorflow_pytorch_and_flax
A ="Flax" in generate_tensorflow_pytorch_and_flax
A =f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(A_ , exist_ok=A_ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(snake_case__ : int ):
with open(A_ , "r" ) as f:
A =f.readlines()
with open(A_ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case__ : str , snake_case__ : str , snake_case__ : List[str] ):
# Create temp file
A , A =mkstemp()
A =False
with fdopen(A_ , "w" ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A =True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(A_ , A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ , A_ )
def skip_units(snake_case__ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case__ : Tuple ):
with open(A_ ) as datafile:
A =[]
A =False
A =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A =line.split("\"" )[1]
A =skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A =line.split("\"" )[1]
A =skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ , A_ , A_ )
A =[]
elif "# Replace with" in line and "##" not in line:
A =[]
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(A_ )
| 700 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase__:
"""simple docstring"""
_A = PegasusConfig
_A = {}
_A = 'gelu'
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : List[str]=13 , snake_case__ : Union[str, Any]=7 , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=99 , snake_case__ : Optional[int]=32 , snake_case__ : Tuple=5 , snake_case__ : Any=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Dict=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[int]=20 , snake_case__ : Tuple=2 , snake_case__ : Dict=1 , snake_case__ : Optional[int]=0 , ):
"""simple docstring"""
A =parent
A =batch_size
A =seq_length
A =is_training
A =use_labels
A =vocab_size
A =hidden_size
A =num_hidden_layers
A =num_attention_heads
A =intermediate_size
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =eos_token_id
A =pad_token_id
A =bos_token_id
def _a ( self : Tuple ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
A =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
A =np.concatenate([input_ids, eos_tensor] , axis=1 )
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A =prepare_pegasus_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =20
A =model_class_name(snake_case__ )
A =model.encode(inputs_dict["input_ids"] )
A , A =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A =model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
A =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
A =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A =model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
A =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A =model.decode(
decoder_input_ids[:, -1:] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case__ , )
A =model.decode(snake_case__ , snake_case__ )
A =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _a ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
A =20
A =model_class_name(snake_case__ )
A =model.encode(inputs_dict["input_ids"] )
A , A =(
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A =model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
A =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A =model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
A =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A =model.decode(
decoder_input_ids[:, -1:] , snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case__ , decoder_position_ids=snake_case__ , )
A =model.decode(snake_case__ , snake_case__ , decoder_attention_mask=snake_case__ )
A =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def UpperCamelCase_ ( a_ , a_ , a_ , a_=None , a_=None , ) ->List[Any]:
if attention_mask is None:
A =np.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
A =np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase__( lowercase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_A = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_A = True
_A = False
_A = False
_A = False
def _a ( self : Dict ):
"""simple docstring"""
A =FlaxPegasusModelTester(self )
A =ConfigTester(self , config_class=snake_case__ )
def _a ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ )
def _a ( self : List[str] ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case__ , snake_case__ , snake_case__ )
def _a ( self : int ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A =self._prepare_for_class(snake_case__ , snake_case__ )
A =model_class(snake_case__ )
@jax.jit
def encode_jitted(snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None , **snake_case__ : str ):
return model.encode(input_ids=snake_case__ , attention_mask=snake_case__ )
with self.subTest("JIT Enabled" ):
A =encode_jitted(**snake_case__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A =encode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A =model_class(snake_case__ )
A =model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
A ={
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
return model.decode(
decoder_input_ids=snake_case__ , decoder_attention_mask=snake_case__ , encoder_outputs=snake_case__ , )
with self.subTest("JIT Enabled" ):
A =decode_jitted(**snake_case__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A =decode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A =model_class_name.from_pretrained("google/pegasus-large" , from_pt=snake_case__ )
A =np.ones((1, 1) )
A =model(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
A =PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
A =[
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A =[
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
A =tokenizer(snake_case__ , return_tensors="np" , truncation=snake_case__ , max_length=5_12 , padding=snake_case__ )
A =model.generate(**snake_case__ , num_beams=2 ).sequences
A =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
assert tgt_text == decoded
| 701 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->Optional[Any]:
A =StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A =load_file(a_ )
A =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A =key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
A =pipeline.text_encoder
else:
A =key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
A =pipeline.unet
# find the target layer
A =layer_infos.pop(0 )
while len(a_ ) > -1:
try:
A =curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
A =layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A =layer_infos.pop(0 )
A =[]
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A =state_dict[pair_keys[0]].to(torch.floataa )
A =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
__a = parser.parse_args()
__a = args.base_model_path
__a = args.checkpoint_path
__a = args.dump_path
__a = args.lora_prefix_unet
__a = args.lora_prefix_text_encoder
__a = args.alpha
__a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__a = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 702 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__( UpperCamelCase_ ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__A , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(__A , "num_attention_heads" ) )
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Optional[int] , snake_case__ : Dict , snake_case__ : Any=13 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=2 , snake_case__ : Dict=3 , snake_case__ : Optional[Any]=6_40 , snake_case__ : Any=4 , snake_case__ : Dict="silu" , snake_case__ : Union[str, Any]=3 , snake_case__ : Tuple=32 , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Dict=0.02 , snake_case__ : Any=True , snake_case__ : Optional[int]=True , snake_case__ : str=10 , snake_case__ : List[str]=None , ):
"""simple docstring"""
A =parent
A =batch_size
A =image_size
A =patch_size
A =num_channels
A =last_hidden_size
A =num_attention_heads
A =hidden_act
A =conv_kernel_size
A =output_stride
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =classifier_dropout_prob
A =use_labels
A =is_training
A =num_labels
A =initializer_range
A =scope
def _a ( self : List[str] ):
"""simple docstring"""
A =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.num_labels )
A =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A =self.get_config()
return config, pixel_values, labels, pixel_labels
def _a ( self : Optional[Any] ):
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self : Any , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =MobileViTModel(config=__A )
model.to(__A )
model.eval()
A =model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a ( self : Optional[Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Any ):
"""simple docstring"""
A =self.num_labels
A =MobileViTForImageClassification(__A )
model.to(__A )
model.eval()
A =model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
"""simple docstring"""
A =self.num_labels
A =MobileViTForSemanticSegmentation(__A )
model.to(__A )
model.eval()
A =model(__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A =model(__A , labels=__A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
A =config_and_inputs
A ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_A = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_A = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def _a ( self : Any ):
"""simple docstring"""
A =MobileViTModelTester(self )
A =MobileViTConfigTester(self , config_class=__A , has_text_modality=__A )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : Any ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A =model_class(__A )
A =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A =[*signature.parameters.keys()]
A =["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _a ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[Any] ):
A =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
A =model(**self._prepare_for_class(__A , __A ) )
A =outputs.hidden_states
A =5
self.assertEqual(len(__A ) , __A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
A =2
for i in range(len(__A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A =True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A =True
check_hidden_states_output(__A , __A , __A )
def _a ( self : Any ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =MobileViTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def UpperCamelCase_ ( ) ->Optional[int]:
A =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Any ):
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(__A )
A =self.default_image_processor
A =prepare_img()
A =image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
A =model(**__A )
# verify the logits
A =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __A )
A =torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
@slow
def _a ( self : str ):
"""simple docstring"""
A =MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
A =model.to(__A )
A =MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
A =prepare_img()
A =image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
A =model(**__A )
A =outputs.logits
# verify the logits
A =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __A )
A =torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.9_868, -9.7_132], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=__A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __A , atol=1E-4 ) )
@slow
def _a ( self : Any ):
"""simple docstring"""
A =MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
A =model.to(__A )
A =MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
A =prepare_img()
A =image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
A =model(**__A )
A =outputs.logits.detach().cpu()
A =image_processor.post_process_semantic_segmentation(outputs=__A , target_sizes=[(50, 60)] )
A =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __A )
A =image_processor.post_process_semantic_segmentation(outputs=__A )
A =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __A )
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCamelCase__( lowercase__ ):
"""simple docstring"""
_A = 4_2
_A = None
def UpperCamelCase_ ( a_ , a_=0.999 , a_="cosine" , ) ->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A =[]
for i in range(_lowerCamelCase ):
A =i / num_diffusion_timesteps
A =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class UpperCamelCase__( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , snake_case__ : int = 10_00 , snake_case__ : str = "fixed_small_log" , snake_case__ : bool = True , snake_case__ : Optional[float] = 1.0 , snake_case__ : str = "epsilon" , snake_case__ : str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A =betas_for_alpha_bar(snake_case__ )
A =1.0 - self.betas
A =torch.cumprod(self.alphas , dim=0 )
A =torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A =1.0
# setable values
A =None
A =torch.from_numpy(np.arange(0 , snake_case__ )[::-1].copy() )
A =variance_type
def _a ( self : str , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
"""simple docstring"""
return sample
def _a ( self : Optional[Any] , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
"""simple docstring"""
A =num_inference_steps
A =(self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A =(np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A =torch.from_numpy(snake_case__ ).to(snake_case__ )
def _a ( self : List[str] , snake_case__ : str , snake_case__ : Optional[Any]=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=None ):
"""simple docstring"""
if prev_timestep is None:
A =t - 1
A =self.alphas_cumprod[t]
A =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A =1 - alpha_prod_t
A =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A =self.betas[t]
else:
A =1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A =beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A =torch.log(torch.clamp(snake_case__ , min=1E-20 ) )
A =torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A =variance.log()
A =beta.log()
A =(predicted_variance + 1) / 2
A =frac * max_log + (1 - frac) * min_log
return variance
def _a ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = True , ):
"""simple docstring"""
A =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A =torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A =None
# 1. compute alphas, betas
if prev_timestep is None:
A =t - 1
A =self.alphas_cumprod[t]
A =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A =1 - alpha_prod_t
A =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A =self.betas[t]
A =self.alphas[t]
else:
A =1 - alpha_prod_t / alpha_prod_t_prev
A =1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A =model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A =torch.clamp(
snake_case__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A =(alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A =alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A =0
if t > 0:
A =randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case__ , device=model_output.device )
A =self._get_variance(
snake_case__ , predicted_variance=snake_case__ , prev_timestep=snake_case__ , )
if self.variance_type == "fixed_small_log":
A =variance
elif self.variance_type == "learned_range":
A =(0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A =variance * variance_noise
A =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _a ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.IntTensor , ):
"""simple docstring"""
A =self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A =timesteps.to(original_samples.device )
A =alphas_cumprod[timesteps] ** 0.5
A =sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A =sqrt_alpha_prod.unsqueeze(-1 )
A =(1 - alphas_cumprod[timesteps]) ** 0.5
A =sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A =sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 704 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a = logging.get_logger(__name__)
__a = Dict[str, Any]
__a = List[Prediction]
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class UpperCamelCase__( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(*_a , **_a )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self : List[Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
A ={}
if "threshold" in kwargs:
A =kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Optional[int] , *snake_case__ : Tuple , **snake_case__ : Tuple ):
"""simple docstring"""
return super().__call__(*_a , **_a )
def _a ( self : Optional[Any] , snake_case__ : Any ):
"""simple docstring"""
A =load_image(_a )
A =torch.IntTensor([[image.height, image.width]] )
A =self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A =self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A =target_size
return inputs
def _a ( self : Any , snake_case__ : List[str] ):
"""simple docstring"""
A =model_inputs.pop("target_size" )
A =self.model(**_a )
A =outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A =model_inputs["""bbox"""]
return model_outputs
def _a ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict=0.9 ):
"""simple docstring"""
A =model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A =target_size[0].tolist()
def unnormalize(snake_case__ : List[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
A =model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A =[self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A =[unnormalize(_a ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
A =["""score""", """label""", """box"""]
A =[dict(zip(_a , _a ) ) for vals in zip(scores.tolist() , _a , _a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A =self.image_processor.post_process_object_detection(_a , _a , _a )
A =raw_annotations[0]
A =raw_annotation["""scores"""]
A =raw_annotation["""labels"""]
A =raw_annotation["""boxes"""]
A =scores.tolist()
A =[self.model.config.idalabel[label.item()] for label in labels]
A =[self._get_bounding_box(_a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A =["""score""", """label""", """box"""]
A =[
dict(zip(_a , _a ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def _a ( self : int , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A =box.int().tolist()
A ={
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase__( yaml.SafeLoader ):
"""simple docstring"""
def _a ( self : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
A =[self.constructed_objects[key_node] for key_node, _ in node.value]
A =[tuple(a_ ) if isinstance(a_ , a_ ) else key for key in keys]
A =Counter(a_ )
A =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _a ( self : Any , snake_case__ : List[Any] , snake_case__ : List[str]=False ):
"""simple docstring"""
A =super().construct_mapping(a_ , deep=a_ )
self._check_no_duplicates_on_constructed_node(a_ )
return mapping
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
A =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A =full_content[1:].index("---" ) + 1
A ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(snake_case__ )
class UpperCamelCase__( __a ):
"""simple docstring"""
_A = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _a ( cls : List[Any] , snake_case__ : Path ):
"""simple docstring"""
with open(a_ , encoding="utf-8" ) as readme_file:
A =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(a_ )
else:
return cls()
def _a ( self : Tuple , snake_case__ : Path ):
"""simple docstring"""
if path.exists():
with open(a_ , encoding="utf-8" ) as readme_file:
A =readme_file.read()
else:
A =None
A =self._to_readme(a_ )
with open(a_ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(a_ )
def _a ( self : Optional[Any] , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
A =_split_yaml_from_readme(a_ )
A ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
A ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def _a ( cls : Dict , snake_case__ : str ):
"""simple docstring"""
A =yaml.load(a_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A ={
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**a_ )
def _a ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=a_ , allow_unicode=a_ , encoding="utf-8" , ).decode("utf-8" )
__a = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__a = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
__a = ap.parse_args()
__a = Path(args.readme_filepath)
__a = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 706 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 0 |
from manim import *
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
A =Rectangle(height=0.5 , width=0.5 )
A =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A =[mem.copy() for i in range(6 )]
A =[mem.copy() for i in range(6 )]
A =VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
A =VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
A =VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
A =Text("CPU" , font_size=24 )
A =Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
A =[mem.copy() for i in range(4 )]
A =VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
A =Text("GPU" , font_size=24 )
A =Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
A =[mem.copy() for i in range(6 )]
A =VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
A =Text("Model" , font_size=24 )
A =Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
A =[]
for i, rect in enumerate(__lowerCAmelCase ):
rect.set_stroke(__lowerCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
A =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCAmelCase , buff=0.0 )
self.add(__lowerCAmelCase )
cpu_targs.append(__lowerCAmelCase )
A =[mem.copy() for i in range(6 )]
A =VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
A =Text("Loaded Checkpoint" , font_size=24 )
A =Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , aligned_edge=__lowerCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
A =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
A =MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
A =MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) , Write(__lowerCAmelCase ) )
self.play(Write(__lowerCAmelCase , run_time=1 ) , Create(__lowerCAmelCase , run_time=1 ) )
A =[]
A =[]
for i, rect in enumerate(__lowerCAmelCase ):
A =fill.copy().set_fill(__lowerCAmelCase , opacity=0.7 )
target.move_to(__lowerCAmelCase )
first_animations.append(GrowFromCenter(__lowerCAmelCase , run_time=1 ) )
A =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCAmelCase , run_time=1.5 ) )
self.play(*__lowerCAmelCase )
self.play(*__lowerCAmelCase )
self.wait()
| 707 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 0 |
import sys
__a = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def UpperCamelCase_ ( a_ ) ->int:
A =1
for digit in s:
product *= int(UpperCAmelCase__ )
return product
def UpperCamelCase_ ( a_ = N ) ->int:
A =-sys.maxsize - 1
A =n[:13]
A =13
while cur_index < len(UpperCAmelCase__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
A =substr[1:] + n[cur_index]
cur_index += 1
else:
A =max(UpperCAmelCase__ , str_eval(UpperCAmelCase__ ) )
A =n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 708 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 0 |
def UpperCamelCase_ ( a_ ) ->float:
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
A =sum(_lowercase ) / len(_lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class UpperCamelCase__( UpperCAmelCase__ ):
"""simple docstring"""
_A = "lilt"
def __init__( self : Union[str, Any] , snake_case__ : Optional[Any]=3_05_22 , snake_case__ : str=7_68 , snake_case__ : Any=12 , snake_case__ : Optional[Any]=12 , snake_case__ : List[Any]=30_72 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[str]=5_12 , snake_case__ : Optional[int]=2 , snake_case__ : int=0.02 , snake_case__ : Union[str, Any]=1E-12 , snake_case__ : Tuple=0 , snake_case__ : List[Any]="absolute" , snake_case__ : int=None , snake_case__ : Dict=4 , snake_case__ : Dict=10_24 , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A =vocab_size
A =hidden_size
A =num_hidden_layers
A =num_attention_heads
A =hidden_act
A =intermediate_size
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =type_vocab_size
A =initializer_range
A =layer_norm_eps
A =position_embedding_type
A =classifier_dropout
A =channel_shrink_ratio
A =max_ad_position_embeddings
| 710 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.