code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__a = input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
__a = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
__a = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
__a = requests.get(image_url).content
__a = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 711 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
__a = [file for file in filepaths if ' ' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
__a = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
__a = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
__a = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 712 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__a = logging.get_logger(__name__)
__a = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "marian"
_A = ["past_key_values"]
_A = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , snake_case__ : Dict=5_81_01 , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=10_24 , snake_case__ : Dict=12 , snake_case__ : Optional[int]=40_96 , snake_case__ : str=16 , snake_case__ : List[str]=12 , snake_case__ : Optional[int]=40_96 , snake_case__ : Optional[Any]=16 , snake_case__ : Dict=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : Optional[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Optional[Any]="gelu" , snake_case__ : Dict=10_24 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[int]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=5_81_00 , snake_case__ : List[Any]=False , snake_case__ : Dict=5_81_00 , snake_case__ : Tuple=0 , snake_case__ : Tuple=0 , snake_case__ : Union[str, Any]=True , **snake_case__ : List[str] , ):
"""simple docstring"""
A =vocab_size
A =decoder_vocab_size or vocab_size
A =max_position_embeddings
A =d_model
A =encoder_ffn_dim
A =encoder_layers
A =encoder_attention_heads
A =decoder_ffn_dim
A =decoder_layers
A =decoder_attention_heads
A =dropout
A =attention_dropout
A =activation_dropout
A =activation_function
A =init_std
A =encoder_layerdrop
A =decoder_layerdrop
A =use_cache
A =encoder_layers
A =scale_embedding # scale factor will be sqrt(d_model) if True
A =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _a ( self : Optional[int] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A =OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A ={0: "batch"}
A ={0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A ={0: "batch", 1: "decoder_sequence"}
A ={0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A =OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A , A =self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
A ={0: "batch", 2: "past_sequence + sequence"}
A ={0: "batch", 2: "past_sequence + sequence"}
else:
A =OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _a ( self : List[Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A =super().outputs
else:
A =super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
A , A =self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
A ={0: "batch", 2: "past_sequence + sequence"}
A ={0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] = -1 , snake_case__ : Any = -1 , snake_case__ : Optional[int] = False , snake_case__ : Tuple = None , ):
"""simple docstring"""
A =self._generate_dummy_inputs_for_encoder_and_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
A =seq_length if not self.use_past else 1
A =self._generate_dummy_inputs_for_encoder_and_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A ={f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A =dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A , A =common_inputs["input_ids"].shape
A =common_inputs["decoder_input_ids"].shape[1]
A , A =self.num_attention_heads
A =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A =decoder_seq_length + 3
A =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A =torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
A =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A =self.num_layers
A =min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A =max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
A ="encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
A =encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def _a ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple = -1 , snake_case__ : Union[str, Any] = -1 , snake_case__ : Tuple = False , snake_case__ : Dict = None , ):
"""simple docstring"""
A =self._generate_dummy_inputs_for_encoder_and_decoder(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A , A =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A =seqlen + 2
A , A =self.num_layers
A , A =self.num_attention_heads
A =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A =common_inputs["attention_mask"].dtype
A =torch.cat(
[common_inputs["attention_mask"], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
A =[
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def _a ( self : Tuple , snake_case__ : Tuple , snake_case__ : Dict = -1 , snake_case__ : Optional[Any] = -1 , snake_case__ : List[Any] = False , snake_case__ : List[str] = None , ):
"""simple docstring"""
A =compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A =tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
A =compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
A =[" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A =dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def _a ( self : Dict , snake_case__ : Any , snake_case__ : List[str] = -1 , snake_case__ : Optional[int] = -1 , snake_case__ : Union[str, Any] = False , snake_case__ : List[str] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
A =self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Dict ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A =super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
A =super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-4
| 713 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCamelCase__( UpperCAmelCase__ ):
"""simple docstring"""
_A = 'blenderbot-small'
_A = ['past_key_values']
_A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[Any] , snake_case__ : Dict=5_02_65 , snake_case__ : Dict=5_12 , snake_case__ : Any=8 , snake_case__ : str=20_48 , snake_case__ : Union[str, Any]=16 , snake_case__ : List[str]=8 , snake_case__ : str=20_48 , snake_case__ : Optional[int]=16 , snake_case__ : List[Any]=0.0 , snake_case__ : int=0.0 , snake_case__ : Optional[int]=True , snake_case__ : Optional[Any]=True , snake_case__ : int="gelu" , snake_case__ : Union[str, Any]=5_12 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=1 , snake_case__ : str=False , snake_case__ : str=0 , snake_case__ : int=1 , snake_case__ : Any=2 , snake_case__ : int=2 , **snake_case__ : Tuple , ):
"""simple docstring"""
A =vocab_size
A =max_position_embeddings
A =d_model
A =encoder_ffn_dim
A =encoder_layers
A =encoder_attention_heads
A =decoder_ffn_dim
A =decoder_layers
A =decoder_attention_heads
A =dropout
A =attention_dropout
A =activation_dropout
A =activation_function
A =init_std
A =encoder_layerdrop
A =decoder_layerdrop
A =use_cache
A =encoder_layers
A =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
class UpperCamelCase__( UpperCAmelCase__ ):
"""simple docstring"""
@property
def _a ( self : List[Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A =OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A ={0: """batch"""}
A ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A ={0: """batch""", 1: """decoder_sequence"""}
A ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A =OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A =self.num_layers
for i in range(snake_case__ ):
A ={0: """batch""", 2: """past_sequence + sequence"""}
A ={0: """batch""", 2: """past_sequence + sequence"""}
else:
A =OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A =super().outputs
else:
A =super(snake_case__ , self ).outputs
if self.use_past:
A =self.num_layers
for i in range(snake_case__ ):
A ={0: """batch""", 2: """past_sequence + sequence"""}
A ={0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a ( self : List[str] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
A =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Generate decoder inputs
A =seq_length if not self.use_past else 1
A =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A ={f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A =dict(**snake_case__ , **snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A =common_inputs["""input_ids"""].shape
A =common_inputs["""decoder_input_ids"""].shape[1]
A =self.num_attention_heads
A =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A =decoder_seq_length + 3
A =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A =torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(snake_case__ , snake_case__ )] , dim=1 )
A =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A =self.num_layers
A =min(snake_case__ , snake_case__ )
A =max(snake_case__ , snake_case__ ) - min_num_layers
A ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(snake_case__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
torch.zeros(snake_case__ ),
) )
# TODO: test this.
A =encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(snake_case__ , snake_case__ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) )
return common_inputs
def _a ( self : Any , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
A =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A =seqlen + 2
A =self.num_layers
A =self.num_attention_heads
A =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A =common_inputs["""attention_mask"""].dtype
A =torch.cat(
[common_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
A =[
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(snake_case__ )
]
return common_inputs
def _a ( self : Any , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
A =compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A =tokenizer.num_special_tokens_to_add(snake_case__ )
A =compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
A =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
A =dict(tokenizer(snake_case__ , return_tensors=snake_case__ ) )
return common_inputs
def _a ( self : Tuple , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
elif self.task == "causal-lm":
A =self._generate_dummy_inputs_for_causal_lm(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
else:
A =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
return common_inputs
def _a ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Dict , snake_case__ : List[Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
A =super()._flatten_past_key_values_(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
A =super(snake_case__ , self )._flatten_past_key_values_(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
| 714 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase__( lowercase__ ):
"""simple docstring"""
def __init__( self : Any , snake_case__ : str=0.01 , snake_case__ : str=10_00 ):
"""simple docstring"""
A =p_stop
A =max_length
def __iter__( self : List[str] ):
"""simple docstring"""
A =0
A =False
while not stop and count < self.max_length:
yield count
count += 1
A =random.random() < self.p_stop
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Any=False , snake_case__ : int=True ):
"""simple docstring"""
A =[
BatchSamplerShard(__lowercase , 2 , __lowercase , split_batches=__lowercase , even_batches=__lowercase )
for i in range(2 )
]
A =[list(__lowercase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowercase ) for shard in batch_sampler_shards] , [len(__lowercase ) for e in expected] )
self.assertListEqual(__lowercase , __lowercase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase )
A =BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowercase , __lowercase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A =BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase )
A =BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A =BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase )
A =BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A =BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase )
A =BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase )
# Check the shards when the dataset is very small.
A =BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowercase )
A =[[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowercase , __lowercase )
A =BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowercase )
A =[[], []]
self.check_batch_sampler_shards(__lowercase , __lowercase )
def _a ( self : List[str] ):
"""simple docstring"""
A =BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase )
A =BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase )
# Check the shards when the dataset is not a round multiple of batch size.
A =BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase )
A =BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A =BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase )
A =BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase )
# Check the shards when the dataset is very small.
A =BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowercase )
A =[[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase )
A =BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowercase )
A =[[], []]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase )
def _a ( self : List[str] ):
"""simple docstring"""
A =BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
A =BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A =BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
A =BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A =BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
A =BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A =BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
A =BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowercase )
A =[
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
# Check the shards when the dataset is very small.
A =BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowercase )
A =[[[0, 1]], []]
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
A =BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowercase )
A =[[], []]
self.check_batch_sampler_shards(__lowercase , __lowercase , even_batches=__lowercase )
def _a ( self : int ):
"""simple docstring"""
A =BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase , even_batches=__lowercase )
A =BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase , even_batches=__lowercase )
# Check the shards when the dataset is not a round multiple of batch size.
A =BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase , even_batches=__lowercase )
A =BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase , even_batches=__lowercase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A =BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase , even_batches=__lowercase )
A =BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowercase )
A =[
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase , even_batches=__lowercase )
# Check the shards when the dataset is very small.
A =BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowercase )
A =[[[0, 1]], []]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase , even_batches=__lowercase )
A =BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowercase )
A =[[], []]
self.check_batch_sampler_shards(__lowercase , __lowercase , split_batches=__lowercase , even_batches=__lowercase )
def _a ( self : Tuple ):
"""simple docstring"""
A =[[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
A =[BatchSamplerShard(__lowercase , 2 , __lowercase , even_batches=__lowercase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _a ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any]=False , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[int]=False ):
"""simple docstring"""
random.seed(__lowercase )
A =list(__lowercase )
A =[
IterableDatasetShard(
__lowercase , batch_size=__lowercase , drop_last=__lowercase , num_processes=__lowercase , process_index=__lowercase , split_batches=__lowercase , )
for i in range(__lowercase )
]
A =[]
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowercase )
iterable_dataset_lists.append(list(__lowercase ) )
A =batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
A =iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowercase ) , len(__lowercase ) )
self.assertTrue(len(__lowercase ) % shard_batch_size == 0 )
A =[]
for idx in range(0 , len(__lowercase ) , __lowercase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowercase ) < len(__lowercase ):
reference += reference
self.assertListEqual(__lowercase , reference[: len(__lowercase )] )
def _a ( self : List[str] ):
"""simple docstring"""
A =42
A =RandomIterableDataset()
self.check_iterable_dataset_shards(__lowercase , __lowercase , batch_size=4 , drop_last=__lowercase , split_batches=__lowercase )
self.check_iterable_dataset_shards(__lowercase , __lowercase , batch_size=4 , drop_last=__lowercase , split_batches=__lowercase )
self.check_iterable_dataset_shards(__lowercase , __lowercase , batch_size=4 , drop_last=__lowercase , split_batches=__lowercase )
self.check_iterable_dataset_shards(__lowercase , __lowercase , batch_size=4 , drop_last=__lowercase , split_batches=__lowercase )
# Edge case with a very small dataset
A =RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowercase , __lowercase , batch_size=4 , drop_last=__lowercase , split_batches=__lowercase )
self.check_iterable_dataset_shards(__lowercase , __lowercase , batch_size=4 , drop_last=__lowercase , split_batches=__lowercase )
self.check_iterable_dataset_shards(__lowercase , __lowercase , batch_size=4 , drop_last=__lowercase , split_batches=__lowercase )
self.check_iterable_dataset_shards(__lowercase , __lowercase , batch_size=4 , drop_last=__lowercase , split_batches=__lowercase )
def _a ( self : int ):
"""simple docstring"""
A =BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowercase )
A =SkipBatchSampler(__lowercase , 2 )
self.assertListEqual(list(__lowercase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a ( self : int ):
"""simple docstring"""
A =SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a ( self : Any ):
"""simple docstring"""
A =DataLoader(list(range(16 ) ) , batch_size=4 )
A =skip_first_batches(__lowercase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
Accelerator()
A =DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 715 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 0 |
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
return math.pow(_lowerCAmelCase , 2 ) - a
def UpperCamelCase_ ( a_ ) ->float:
return 2 * x
def UpperCamelCase_ ( a_ ) ->float:
A =2.0
while start <= a:
A =math.pow(_lowerCAmelCase , 2 )
return start
def UpperCamelCase_ ( a_ , a_ = 9999 , a_ = 0.00000000000001 ) ->float:
if a < 0:
raise ValueError("math domain error" )
A =get_initial_point(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
A =value
A =value - fx(_lowerCAmelCase , _lowerCAmelCase ) / fx_derivative(_lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase_ ( a_ ) ->Any:
A =tmp_path / "file.csv"
A =textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def UpperCamelCase_ ( a_ ) ->Any:
A =tmp_path / "malformed_file.csv"
A =textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def UpperCamelCase_ ( a_ , a_ ) ->int:
A =tmp_path / "csv_with_image.csv"
A =textwrap.dedent(
f'''\
image
{image_file}
''' )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def UpperCamelCase_ ( a_ ) ->Tuple:
A =tmp_path / "csv_with_label.csv"
A =textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def UpperCamelCase_ ( a_ ) ->Tuple:
A =tmp_path / "csv_with_int_list.csv"
A =textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(a_ , "w" ) as f:
f.write(a_ )
return str(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Union[str, Any]:
A =Csv()
A =csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase_ ( a_ ) ->Tuple:
with open(a_ , encoding="utf-8" ) as f:
A =f.read().splitlines()[1]
A =Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
A =csv._generate_tables([[csv_file_with_image]] )
A =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
A =pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
with open(a_ , encoding="utf-8" ) as f:
A =f.read().splitlines()[1:]
A =Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
A =csv._generate_tables([[csv_file_with_label]] )
A =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
A =pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(a_ ) for label in labels]
def UpperCamelCase_ ( a_ ) ->int:
A =Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda a_ : [int(a_ ) for i in x.split()]} )
A =csv._generate_tables([[csv_file_with_int_list]] )
A =pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
A =pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 717 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 0 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 718 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__a = 5_0_0_0_0
__a = 5_0_0_0
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def UpperCamelCase_ ( a_ , a_ ) ->int:
for i in range(lowercase__ ):
A =dataset[i]
@get_duration
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Tuple:
for i in range(0 , len(lowercase__ ) , lowercase__ ):
A =dataset[i : i + batch_size]
@get_duration
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Tuple:
with dataset.formatted_as(type=lowercase__ ):
for i in range(lowercase__ ):
A =dataset[i]
@get_duration
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->Optional[Any]:
with dataset.formatted_as(type=lowercase__ ):
for i in range(0 , lowercase__ , lowercase__ ):
A =dataset[i : i + batch_size]
def UpperCamelCase_ ( ) ->Union[str, Any]:
A ={"num examples": SPEED_TEST_N_EXAMPLES}
A =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
A =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
A =datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
A =generate_example_dataset(
os.path.join(lowercase__ , "dataset.arrow" ) , lowercase__ , num_examples=lowercase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowercase__ ) )
A =func(lowercase__ , **lowercase__ )
print("shuffling dataset" )
A =dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowercase__ ) )
A =func(
lowercase__ , **lowercase__ )
with open(lowercase__ , "wb" ) as f:
f.write(json.dumps(lowercase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 719 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['OwlViTFeatureExtractor']
__a = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 0 |
def UpperCamelCase_ ( a_ ) ->List[str]:
A =len(_UpperCamelCase )
for i in range(length - 1 ):
A =i
for k in range(i + 1 , _UpperCamelCase ):
if collection[k] < collection[least]:
A =k
if least != i:
A , A =(collection[i], collection[least])
return collection
if __name__ == "__main__":
__a = input("""Enter numbers separated by a comma:\n""").strip()
__a = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 721 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__( __a , __a , unittest.TestCase ):
"""simple docstring"""
_A = StableDiffusionPanoramaPipeline
_A = TEXT_TO_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_BATCH_PARAMS
_A = TEXT_TO_IMAGE_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A =DDIMScheduler()
torch.manual_seed(0 )
A =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A =CLIPTextModel(A__ )
A =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _a ( self : str , snake_case__ : str , snake_case__ : List[str]=0 ):
"""simple docstring"""
A =torch.manual_seed(A__ )
A ={
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _a ( self : str ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =self.get_dummy_components()
A =StableDiffusionPanoramaPipeline(**A__ )
A =sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A =self.get_dummy_inputs(A__ )
A =sd_pipe(**A__ ).images
A =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A =np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self : Any ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =self.get_dummy_components()
A =StableDiffusionPanoramaPipeline(**A__ )
A =sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A =self.get_dummy_inputs(A__ )
A ="french fries"
A =sd_pipe(**A__ , negative_prompt=A__ )
A =output.images
A =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A =np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Any ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =self.get_dummy_components()
A =StableDiffusionPanoramaPipeline(**A__ )
A =sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A =self.get_dummy_inputs(A__ )
A =sd_pipe(**A__ , view_batch_size=2 )
A =output.images
A =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A =np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =self.get_dummy_components()
A =EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
A =StableDiffusionPanoramaPipeline(**A__ )
A =sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A =self.get_dummy_inputs(A__ )
A =sd_pipe(**A__ ).images
A =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A =np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : List[str] ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =self.get_dummy_components()
A =PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=A__ )
A =StableDiffusionPanoramaPipeline(**A__ )
A =sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A =self.get_dummy_inputs(A__ )
A =sd_pipe(**A__ ).images
A =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A =np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Tuple , snake_case__ : List[str]=0 ):
"""simple docstring"""
A =torch.manual_seed(A__ )
A ={
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _a ( self : List[str] ):
"""simple docstring"""
A ="stabilityai/stable-diffusion-2-base"
A =DDIMScheduler.from_pretrained(A__ , subfolder="scheduler" )
A =StableDiffusionPanoramaPipeline.from_pretrained(A__ , scheduler=A__ , safety_checker=A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
A =self.get_inputs()
A =pipe(**A__ ).images
A =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
A =np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self : Tuple ):
"""simple docstring"""
A =StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=A__ )
A =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
A =self.get_inputs()
A =pipe(**A__ ).images
A =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
A =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self : Tuple ):
"""simple docstring"""
A =0
def callback_fn(snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ) -> None:
A =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
A =latents[0, -3:, -3:, -1]
A =np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
A =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
A =latents[0, -3:, -3:, -1]
A =np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
A =False
A ="stabilityai/stable-diffusion-2-base"
A =DDIMScheduler.from_pretrained(A__ , subfolder="scheduler" )
A =StableDiffusionPanoramaPipeline.from_pretrained(A__ , scheduler=A__ , safety_checker=A__ )
A =pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
A =self.get_inputs()
pipe(**A__ , callback=A__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self : Union[str, Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A ="stabilityai/stable-diffusion-2-base"
A =DDIMScheduler.from_pretrained(A__ , subfolder="scheduler" )
A =StableDiffusionPanoramaPipeline.from_pretrained(A__ , scheduler=A__ , safety_checker=A__ )
A =pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A =self.get_inputs()
A =pipe(**A__ )
A =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 700 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str=13 , snake_case__ : Any=7 , snake_case__ : Union[str, Any]=6 , snake_case__ : Optional[Any]=17 , snake_case__ : Optional[Any]=23 , snake_case__ : Optional[int]=11 , snake_case__ : Any=True , ):
"""simple docstring"""
A =parent
A =batch_size
A =seq_length
A =act_dim
A =state_dim
A =hidden_size
A =max_length
A =is_training
def _a ( self : Any ):
"""simple docstring"""
A =floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
A =floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
A =floats_tensor((self.batch_size, self.seq_length, 1) )
A =floats_tensor((self.batch_size, self.seq_length, 1) )
A =ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
A =random_attention_mask((self.batch_size, self.seq_length) )
A =self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _a ( self : Any ):
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _a ( self : List[Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[str] , ):
"""simple docstring"""
A =DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
A =model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _a ( self : str ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (DecisionTransformerModel,) if is_torch_available() else ()
_A = ()
_A = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_A = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_A = False
_A = False
_A = False
_A = False
_A = False
_A = False
_A = False
_A = False
_A = False
def _a ( self : Dict ):
"""simple docstring"""
A =DecisionTransformerModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Dict ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def _a ( self : str ):
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _a ( self : Dict ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A =model_class(snake_case__ )
A =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A =[*signature.parameters.keys()]
A =[
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =2 # number of steps of autoregressive prediction we will perform
A =10 # defined by the RL environment, may be normalized
A =DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
A =model.to(snake_case__ )
A =model.config
torch.manual_seed(0 )
A =torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
A =torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=snake_case__ )
A =torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
A =state
A =torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
A =torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
A =torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
A =torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
A =torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
A =torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
A , A , A =model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
A , A , A , A =( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
A =action_pred[0, -1]
A =torch.cat([states, state] , dim=1 )
A =returns_to_go[0, -1] - reward
A =torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
A =torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 701 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
__a = """\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"""
__a = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__a = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 702 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 0 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
A =''''''
for i in table:
res += inp[i - 1]
return res
def UpperCamelCase_ ( a_ ) ->str:
return data[1:] + data[0]
def UpperCamelCase_ ( a_ , a_ ) ->List[str]:
A =''''''
for i in range(len(lowerCAmelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCamelCase_ ( a_ , a_ ) ->Optional[int]:
A =int("0b" + data[0] + data[-1] , 2 )
A =int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->List[Any]:
A =message[:4]
A =message[4:]
A =apply_table(lowerCAmelCase_ , lowerCAmelCase_ )
A =xor(lowerCAmelCase_ , lowerCAmelCase_ )
A =apply_sbox(lowerCAmelCase_ , temp[:4] ) # noqa: E741
A =apply_sbox(lowerCAmelCase_ , temp[4:] )
A ='''0''' * (2 - len(lowerCAmelCase_ )) + l # noqa: E741
A ='''0''' * (2 - len(lowerCAmelCase_ )) + r
A =apply_table(l + r , lowerCAmelCase_ )
A =xor(lowerCAmelCase_ , lowerCAmelCase_ )
return temp + right
if __name__ == "__main__":
__a = input("""Enter 10 bit key: """)
__a = input("""Enter 8 bit message: """)
__a = [6, 3, 7, 4, 8, 5, 1_0, 9]
__a = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__a = [2, 4, 3, 1]
__a = [2, 6, 3, 1, 4, 8, 5, 7]
__a = [4, 1, 3, 5, 7, 2, 8, 6]
__a = [4, 1, 2, 3, 2, 3, 4, 1]
__a = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__a = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__a = apply_table(key, paa_table)
__a = temp[:5]
__a = temp[5:]
__a = left_shift(left)
__a = left_shift(right)
__a = apply_table(left + right, pa_table)
__a = left_shift(left)
__a = left_shift(right)
__a = left_shift(left)
__a = left_shift(right)
__a = apply_table(left + right, pa_table)
# encryption
__a = apply_table(message, IP)
__a = function(expansion, sa, sa, keya, temp)
__a = temp[4:] + temp[:4]
__a = function(expansion, sa, sa, keya, temp)
__a = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__a = apply_table(CT, IP)
__a = function(expansion, sa, sa, keya, temp)
__a = temp[4:] + temp[:4]
__a = function(expansion, sa, sa, keya, temp)
__a = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__a = """."""
if __name__ == "__main__":
__a = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
__a = []
__a = []
with open(doctest_file_path) as fp:
for line in fp:
__a = line.strip()
__a = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__a = """\n""".join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 704 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 0 |
def UpperCamelCase_ ( a_ = 5000_0000 ) ->Optional[int]:
A =set()
A =int((limit - 24) ** (1 / 2) )
A =set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
A =primea * primea
for primea in primes:
A =primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
A =primea * primea * primea * primea
A =square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__a = logging.get_logger(__name__)
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = ["pixel_values"]
def __init__( self : Dict , snake_case__ : bool = True , snake_case__ : Optional[Dict[str, int]] = None , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , **snake_case__ : str , ):
"""simple docstring"""
super().__init__(**snake_case__ )
A =size if size is not None else {"shortest_edge": 2_56}
A =get_size_dict(snake_case__ , default_to_square=snake_case__ )
A =crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
A =get_size_dict(snake_case__ , param_name="crop_size" )
A =do_resize
A =size
A =resample
A =do_center_crop
A =crop_size
A =do_rescale
A =rescale_factor
A =do_normalize
A =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A =image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self : str , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Tuple , ):
"""simple docstring"""
A =get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A =get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _a ( self : Optional[int] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] , ):
"""simple docstring"""
A =get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def _a ( self : Any , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[Any] ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _a ( self : List[str] , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _a ( self : Any , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : List[Any] , ):
"""simple docstring"""
A =do_resize if do_resize is not None else self.do_resize
A =size if size is not None else self.size
A =get_size_dict(snake_case__ , default_to_square=snake_case__ )
A =resample if resample is not None else self.resample
A =do_center_crop if do_center_crop is not None else self.do_center_crop
A =crop_size if crop_size is not None else self.crop_size
A =get_size_dict(snake_case__ , param_name="crop_size" )
A =do_rescale if do_rescale is not None else self.do_rescale
A =rescale_factor if rescale_factor is not None else self.rescale_factor
A =do_normalize if do_normalize is not None else self.do_normalize
A =image_mean if image_mean is not None else self.image_mean
A =image_std if image_std is not None else self.image_std
A =make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
A =[to_numpy_array(snake_case__ ) for image in images]
if do_resize:
A =[self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
A =[self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
A =[self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
A =[self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
A =[to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
A ={"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def _a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Tuple] = None ):
"""simple docstring"""
A =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case__ ):
A =target_sizes.numpy()
A =[]
for idx in range(len(snake_case__ ) ):
A =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case__ )
A =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
A =logits.argmax(dim=1 )
A =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 706 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 0 |
from __future__ import annotations
__a = """Muhammad Umer Farooq"""
__a = """MIT"""
__a = """1.0.0"""
__a = """Muhammad Umer Farooq"""
__a = """[email protected]"""
__a = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class UpperCamelCase__( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case__ : str ):
"""simple docstring"""
super().__init__()
A =[]
A =domain
def _a ( self : List[Any] , snake_case__ : str , snake_case__ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
A =parse.urljoin(self.domain , __a )
self.urls.append(__a )
def UpperCamelCase_ ( a_ ) ->str:
return ".".join(get_sub_domain_name(lowercase_ ).split("." )[-2:] )
def UpperCamelCase_ ( a_ ) ->str:
return parse.urlparse(lowercase_ ).netloc
def UpperCamelCase_ ( a_ = "https://github.com" ) ->list[str]:
A =get_domain_name(lowercase_ )
# Initialize the parser
A =Parser(lowercase_ )
try:
# Open URL
A =requests.get(lowercase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
A =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
A =requests.get(lowercase_ )
# Get the valid email.
A =re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase_ )
if __name__ == "__main__":
__a = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 707 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 0 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->bool:
A =str(__UpperCamelCase )
return n == n[::-1]
def UpperCamelCase_ ( a_ = 100_0000 ) ->Dict:
A =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 708 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def UpperCamelCase_ ( a_ , a_=False ) ->Optional[int]:
A =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A =[(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase_ ( a_ , a_ , a_=False ) ->Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
A =""
else:
A ="vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A =in_proj_weight[
: config.hidden_size, :
]
A =in_proj_bias[: config.hidden_size]
A =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A =in_proj_weight[
-config.hidden_size :, :
]
A =in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( a_ ) ->str:
A =["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->str:
A =dct.pop(a_ )
A =val
def UpperCamelCase_ ( ) ->Optional[int]:
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
A =Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ , a_=True ) ->int:
A =ViTConfig()
# patch_size
if model_name[-1] == "8":
A =8
# set labels if required
if not base_model:
A =1000
A ="huggingface/label-files"
A ="imagenet-1k-id2label.json"
A =json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
A ={int(a_ ): v for k, v in idalabel.items()}
A =idalabel
A ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A =384
A =1536
A =12
A =6
# load original model from torch hub
A =torch.hub.load("facebookresearch/dino:main" , a_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A =original_model.state_dict()
if base_model:
remove_classification_head_(a_ )
A =create_rename_keys(a_ , base_model=a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ , a_ )
# load HuggingFace model
if base_model:
A =ViTModel(a_ , add_pooling_layer=a_ ).eval()
else:
A =ViTForImageClassification(a_ ).eval()
model.load_state_dict(a_ )
# Check outputs on an image, prepared by ViTImageProcessor
A =ViTImageProcessor()
A =image_processor(images=prepare_img() , return_tensors="pt" )
A =encoding["pixel_values"]
A =model(a_ )
if base_model:
A =original_model(a_ )
assert torch.allclose(a_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
A =original_model(a_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1E-3 )
Path(a_ ).mkdir(exist_ok=a_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
__a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 709 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 0 |
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase_ ( a_ , a_ , a_=None , **a_ ) ->Dict:
A =[x.strip() for x in open(snake_case_ ).readlines()]
A =[x.strip() for x in open(snake_case_ ).readlines()][: len(snake_case_ )]
A =calculate_rouge(snake_case_ , snake_case_ , **snake_case_ )
if save_path is not None:
save_json(snake_case_ , snake_case_ , indent=snake_case_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 710 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__a = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
__a = dataset.iloc[:, 1:2].values
__a = dataset.iloc[:, 2].values
__a = train_test_split(X, y, test_size=0.2, random_state=0)
__a = PolynomialFeatures(degree=4)
__a = poly_reg.fit_transform(X)
__a = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase_ ( ) ->List[str]:
plt.scatter(__UpperCamelCase , __UpperCamelCase , color="red" )
plt.plot(__UpperCamelCase , pol_reg.predict(poly_reg.fit_transform(__UpperCamelCase ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 711 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__a = logging.get_logger(__name__)
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 712 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int = None ):
"""simple docstring"""
A =value
A =random()
A =None
A =None
def __repr__( self : List[str] ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : Optional[int] ):
"""simple docstring"""
A =str(self.value ) + " "
A =str(self.left or "" )
A =str(self.right or "" )
return value + left + right
def UpperCamelCase_ ( a_ , a_ ) ->tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A =split(root.left , __snake_case )
return left, root
else:
A =split(root.right , __snake_case )
return root, right
def UpperCamelCase_ ( a_ , a_ ) ->Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A =merge(left.right , __snake_case )
return left
else:
A =merge(__snake_case , right.left )
return right
def UpperCamelCase_ ( a_ , a_ ) ->Node | None:
A =Node(__snake_case )
A =split(__snake_case , __snake_case )
return merge(merge(__snake_case , __snake_case ) , __snake_case )
def UpperCamelCase_ ( a_ , a_ ) ->Node | None:
A =split(__snake_case , value - 1 )
A =split(__snake_case , __snake_case )
return merge(__snake_case , __snake_case )
def UpperCamelCase_ ( a_ ) ->None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def UpperCamelCase_ ( a_ , a_ ) ->Node | None:
for arg in args.split():
if arg[0] == "+":
A =insert(__snake_case , int(arg[1:] ) )
elif arg[0] == "-":
A =erase(__snake_case , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def UpperCamelCase_ ( ) ->None:
A =None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
A =input()
while args != "q":
A =interact_treap(__snake_case , __snake_case )
print(__snake_case )
A =input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 713 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def UpperCamelCase_ ( a_ ) ->Dict:
print("Loading config file..." )
def flatten_yaml_as_dict(a_ , a_="" , a_="." ):
A =[]
for k, v in d.items():
A =parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase__ , UpperCamelCase__ , sep=UpperCamelCase__ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase__ )
A =argparse.Namespace()
with open(UpperCamelCase__ , "r" ) as yaml_file:
try:
A =yaml.load(UpperCamelCase__ , Loader=yaml.FullLoader )
A =flatten_yaml_as_dict(UpperCamelCase__ )
for k, v in flat_cfg.items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(UpperCamelCase__ , str(UpperCamelCase__ ) ) )
return config
def UpperCamelCase_ ( a_ , a_ ) ->Optional[int]:
A =MobileViTVaConfig()
A =False
# dataset
if task_name.startswith("imagenet1k_" ):
A =1000
if int(task_name.strip().split("_" )[-1] ) == 384:
A =384
else:
A =256
A ="imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
A =2_1000
if int(task_name.strip().split("_" )[-1] ) == 384:
A =384
else:
A =256
A ="imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
A =151
A =512
A ="ade20k-id2label.json"
A =True
elif task_name.startswith("voc_" ):
A =21
A =512
A ="pascal-voc-id2label.json"
A =True
# orig_config
A =load_orig_config_file(UpperCamelCase__ )
assert getattr(UpperCamelCase__ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
A =getattr(UpperCamelCase__ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(UpperCamelCase__ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A =getattr(UpperCamelCase__ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A =getattr(UpperCamelCase__ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
A =getattr(UpperCamelCase__ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
A =getattr(UpperCamelCase__ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
A =getattr(UpperCamelCase__ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
A ="huggingface/label-files"
A =json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) )
A ={int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A =idalabel
A ={v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( a_ , a_ , a_ ) ->List[Any]:
A =dct.pop(UpperCamelCase__ )
A =val
def UpperCamelCase_ ( a_ , a_=False ) ->Any:
if base_model:
A =""
else:
A ="mobilevitv2."
A =[]
for k in state_dict.keys():
if k[:8] == "encoder.":
A =k[8:]
else:
A =k
if ".block." in k:
A =k_new.replace(".block." , "." )
if ".conv." in k:
A =k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
A =k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
A =k_new.replace("conv_1." , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A =k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A =k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
A =k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A =k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A =k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A =k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A =[0, 1]
elif i == 4:
A =[0, 1, 2, 3]
elif i == 5:
A =[0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A =k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A =k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A =k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A =k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
A =k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
A =k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
A =k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
A =k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
A =k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
A =k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
A =k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
A =k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCamelCase_ ( a_ ) ->List[str]:
A =[]
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(UpperCamelCase__ )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase_ ( ) ->Dict:
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A =Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->Optional[Any]:
A =get_mobilevitva_config(UpperCamelCase__ , UpperCamelCase__ )
# load original state_dict
A =torch.load(UpperCamelCase__ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
A =MobileViTVaForSemanticSegmentation(UpperCamelCase__ ).eval()
A =False
else:
A =MobileViTVaForImageClassification(UpperCamelCase__ ).eval()
A =False
# remove and rename some keys of load the original model
A =checkpoint
remove_unused_keys(UpperCamelCase__ )
A =create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load modified state_dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
A =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A =image_processor(images=prepare_img() , return_tensors="pt" )
A =model(**UpperCamelCase__ )
# verify classification model
if task_name.startswith("imagenet" ):
A =outputs.logits
A =logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A =torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . """
"""\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n """
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
__a = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 715 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCamelCase__( __UpperCAmelCase ):
"""simple docstring"""
_A = '''char'''
_A = '''bpe'''
_A = '''wp'''
__a = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCamelCase__( __UpperCAmelCase ):
"""simple docstring"""
_A = ['''image_processor''', '''char_tokenizer''']
_A = '''ViTImageProcessor'''
_A = '''MgpstrTokenizer'''
def __init__( self : List[Any] , snake_case__ : Any=None , snake_case__ : Optional[Any]=None , **snake_case__ : str ):
"""simple docstring"""
A =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __SCREAMING_SNAKE_CASE , )
A =kwargs.pop("feature_extractor" )
A =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
A =tokenizer
A =AutoTokenizer.from_pretrained("gpt2" )
A =AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self : Tuple , snake_case__ : Dict=None , snake_case__ : List[str]=None , snake_case__ : int=None , **snake_case__ : Optional[int] ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
A =self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
A =self.char_tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
A , A , A =sequences
A =char_preds.size(0 )
A , A =self._decode_helper(__SCREAMING_SNAKE_CASE , "char" )
A , A =self._decode_helper(__SCREAMING_SNAKE_CASE , "bpe" )
A , A =self._decode_helper(__SCREAMING_SNAKE_CASE , "wp" )
A =[]
A =[]
for i in range(__SCREAMING_SNAKE_CASE ):
A =[char_scores[i], bpe_scores[i], wp_scores[i]]
A =[char_strs[i], bpe_strs[i], wp_strs[i]]
A =scores.index(max(__SCREAMING_SNAKE_CASE ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A ={}
A =final_strs
A =final_scores
A =char_strs
A =bpe_strs
A =wp_strs
return out
def _a ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
A =self.char_decode
A =1
A ="[s]"
elif format == DecodeType.BPE:
A =self.bpe_decode
A =2
A ="#"
elif format == DecodeType.WORDPIECE:
A =self.wp_decode
A =1_02
A ="[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''' )
A , A =[], []
A =pred_logits.size(0 )
A =pred_logits.size(1 )
A , A =pred_logits.topk(1 , dim=-1 , largest=__SCREAMING_SNAKE_CASE , sorted=__SCREAMING_SNAKE_CASE )
A =preds_index.view(-1 , __SCREAMING_SNAKE_CASE )[:, 1:]
A =decoder(__SCREAMING_SNAKE_CASE )
A , A =torch.nn.functional.softmax(__SCREAMING_SNAKE_CASE , dim=2 ).max(dim=2 )
A =preds_max_prob[:, 1:]
for index in range(__SCREAMING_SNAKE_CASE ):
A =preds_str[index].find(__SCREAMING_SNAKE_CASE )
A =preds_str[index][:pred_eos]
A =preds_index[index].cpu().tolist()
A =pred_index.index(__SCREAMING_SNAKE_CASE ) if eos_token in pred_index else -1
A =preds_max_prob[index][: pred_eos_index + 1]
A =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__SCREAMING_SNAKE_CASE )
conf_scores.append(__SCREAMING_SNAKE_CASE )
return dec_strs, conf_scores
def _a ( self : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
A =[seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )]
return decode_strs
def _a ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =[seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )]
return decode_strs
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCamelCase__( UpperCamelCase_ ):
"""simple docstring"""
_A = None
_A = None
_A = None
_A = None
class UpperCamelCase__( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , snake_case__ : List[Any]=1 , snake_case__ : Tuple=0 , snake_case__ : Tuple=2 , snake_case__ : Union[str, Any]=5_12 , snake_case__ : str="cls" , snake_case__ : Dict=False , snake_case__ : int=True , **snake_case__ : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
A =project_dim
A =pooler_fn
A =learn_encoder
A =use_attention_mask
class UpperCamelCase__( UpperCamelCase_ ):
"""simple docstring"""
_A = [r"pooler", r"logit_scale"]
_A = [r"position_ids", r"predictions.decoder.bias"]
_A = "roberta"
_A = RobertaSeriesConfig
def __init__( self : str , snake_case__ : int ):
"""simple docstring"""
super().__init__(__a )
A =XLMRobertaModel(__a )
A =nn.Linear(config.hidden_size , config.project_dim )
A =getattr(__a , "has_pre_transformation" , __a )
if self.has_pre_transformation:
A =nn.Linear(config.hidden_size , config.project_dim )
A =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _a ( self : Optional[int] , snake_case__ : List[Any] = None , snake_case__ : Any = None , snake_case__ : str = None , snake_case__ : Optional[Any] = None , snake_case__ : List[Any] = None , snake_case__ : List[Any] = None , snake_case__ : Any = None , snake_case__ : Dict = None , snake_case__ : Dict = None , snake_case__ : Optional[int] = None , snake_case__ : List[Any] = None , ):
"""simple docstring"""
A =return_dict if return_dict is not None else self.config.use_return_dict
A =self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
A =outputs['hidden_states'][-2]
A =self.pre_LN(__a )
A =self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
A =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 717 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
__a = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def UpperCamelCase_ ( a_ ) ->int:
A =torch.load(_lowercase , map_location="cpu" )
return sd
def UpperCamelCase_ ( a_ , a_ , a_=rename_keys_prefix ) ->Optional[int]:
A =OrderedDict()
A =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A =key
for name_pair in rename_keys_prefix:
A =new_key.replace(name_pair[0] , name_pair[1] )
A =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ ) ->Any:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
A ="pretraining"
if "vcr" in checkpoint_path:
A ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A ={"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
A ={"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
A ={"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
A ={"visual_embedding_dim": 512}
A ="multichoice"
elif "vqa_advanced" in checkpoint_path:
A ={"visual_embedding_dim": 2048}
A ="vqa_advanced"
elif "vqa" in checkpoint_path:
A ={"visual_embedding_dim": 2048, "num_labels": 3129}
A ="vqa"
elif "nlvr" in checkpoint_path:
A ={
"visual_embedding_dim": 1024,
"num_labels": 2,
}
A ="nlvr"
A =VisualBertConfig(**_lowercase )
# Load State Dict
A =load_state_dict(_lowercase )
A =get_new_dict(_lowercase , _lowercase )
if model_type == "pretraining":
A =VisualBertForPreTraining(_lowercase )
elif model_type == "vqa":
A =VisualBertForQuestionAnswering(_lowercase )
elif model_type == "nlvr":
A =VisualBertForVisualReasoning(_lowercase )
elif model_type == "multichoice":
A =VisualBertForMultipleChoice(_lowercase )
model.load_state_dict(_lowercase )
# Save Checkpoints
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
__a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 718 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__a = {
"""169M""": 1_2,
"""430M""": 2_4,
"""1B5""": 2_4,
"""3B""": 3_2,
"""7B""": 3_2,
"""14B""": 4_0,
}
__a = {
"""169M""": 7_6_8,
"""430M""": 1_0_2_4,
"""1B5""": 2_0_4_8,
"""3B""": 2_5_6_0,
"""7B""": 4_0_9_6,
"""14B""": 5_1_2_0,
}
def UpperCamelCase_ ( a_ ) ->Dict:
A =list(state_dict.keys() )
for name in state_dict_keys:
A =state_dict.pop(_lowerCamelCase )
# emb -> embedding
if name.startswith("emb." ):
A =name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
A =name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
A =re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , _lowerCamelCase )
# ffn -> feed_forward
A =re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , _lowerCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
A =name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
A =name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
A =name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
A ="""rwkv.""" + name
A =weight
return state_dict
def UpperCamelCase_ ( a_ , a_ , a_ , a_=None , a_=None , a_=False , a_=None ) ->Optional[Any]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
A =5_0277
A =AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
A =PreTrainedTokenizerFast(tokenizer_file=_lowerCamelCase )
A =len(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
# 2. Build the config
A =list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A =candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
A =RwkvConfig(
vocab_size=_lowerCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_lowerCamelCase )
# 3. Download model file then convert state_dict
A =hf_hub_download(_lowerCamelCase , _lowerCamelCase )
A =torch.load(_lowerCamelCase , map_location="cpu" )
A =convert_state_dict(_lowerCamelCase )
# 4. Split in shards and save
A =shard_checkpoint(_lowerCamelCase )
for shard_file, shard in shards.items():
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if index is not None:
A =os.path.join(_lowerCamelCase , _lowerCamelCase )
# Save the index as well
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
A =json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
A =list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A =torch.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
A =AutoModelForCausalLM.from_pretrained(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__a = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 719 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
A =get_activation("swish" )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =get_activation("silu" )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : List[Any] ):
"""simple docstring"""
A =get_activation("mish" )
self.assertIsInstance(A__ , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : List[str] ):
"""simple docstring"""
A =get_activation("gelu" )
self.assertIsInstance(A__ , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 720 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 0 |
import os
import pytest
from attr import dataclass
__a = """us-east-1""" # defaults region
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_A = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
_A = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def _a ( self : str ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return f'''{self.framework}-transfromers-test'''
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCamelCase_ ( a_ ) ->int:
A =SageMakerTestEnvironment(framework=request.cls.framework )
| 721 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 0 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = """▁"""
__a = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = BertGenerationTokenizer
_A = False
_A = True
def _a ( self : Dict ):
"""simple docstring"""
super().setUp()
A =BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Dict ):
"""simple docstring"""
A ="<s>"
A =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _a ( self : Dict ):
"""simple docstring"""
A =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase__ ) , 10_02 )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def _a ( self : Any ):
"""simple docstring"""
A =BertGenerationTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A =tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2_85, 46, 10, 1_70, 3_82] , )
A =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A =tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A ="Hello World!"
A =[1_85_36, 22_60, 1_01]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =(
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
A =[
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def _a ( self : Any ):
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A =list(self.big_tokenizer.get_vocab().keys() )[:10]
A =" ".join(UpperCamelCase__ )
A =self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A =self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCamelCase__ )
A =BertGenerationConfig()
A =BertGenerationEncoder(UpperCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def _a ( self : str ):
"""simple docstring"""
A ={"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 700 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 0 |
from __future__ import annotations
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->Any:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A , A =array[indexa], array[indexa]
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->str:
if length > 1:
A =int(length / 2 )
for i in range(a_ , low + middle ):
comp_and_swap(a_ , a_ , i + middle , a_ )
bitonic_merge(a_ , a_ , a_ , a_ )
bitonic_merge(a_ , low + middle , a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->int:
if length > 1:
A =int(length / 2 )
bitonic_sort(a_ , a_ , a_ , 1 )
bitonic_sort(a_ , low + middle , a_ , 0 )
bitonic_merge(a_ , a_ , a_ , a_ )
if __name__ == "__main__":
__a = input("""Enter numbers separated by a comma:\n""").strip()
__a = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 701 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
"""configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""],
"""tokenization_tapas""": ["""TapasTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TapasForMaskedLM""",
"""TapasForQuestionAnswering""",
"""TapasForSequenceClassification""",
"""TapasModel""",
"""TapasPreTrainedModel""",
"""load_tf_weights_in_tapas""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFTapasForMaskedLM""",
"""TFTapasForQuestionAnswering""",
"""TFTapasForSequenceClassification""",
"""TFTapasModel""",
"""TFTapasPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = ["pixel_values"]
def __init__( self : Tuple , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : int = 0.9 , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : Union[int, float] = 1 / 2_55 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**snake_case__ )
A =size if size is not None else {'shortest_edge': 2_24}
A =get_size_dict(snake_case__ , default_to_square=snake_case__ )
A =crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A =get_size_dict(snake_case__ , param_name="crop_size" )
A =do_resize
A =size
A =crop_pct
A =resample
A =do_center_crop
A =crop_size
A =do_rescale
A =rescale_factor
A =do_normalize
A =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self : List[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[float] = None , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Dict , ):
"""simple docstring"""
A =get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
A =int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
A =int(size["height"] / crop_pct )
else:
A =(int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(snake_case__ ) )
A =get_resize_output_image_size(snake_case__ , size=snake_case__ , default_to_square=snake_case__ )
else:
if "shortest_edge" in size:
A =get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
elif "height" in size and "width" in size:
A =(size['height'], size['width'])
else:
raise ValueError("Invalid size for resize: {}".format(snake_case__ ) )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _a ( self : Dict , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Any , ):
"""simple docstring"""
A =get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def _a ( self : Optional[int] , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] , ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _a ( self : int , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[Any] , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _a ( self : Dict , snake_case__ : ImageInput , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : int = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : bool = None , snake_case__ : float = None , snake_case__ : bool = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : Optional[Any] , ):
"""simple docstring"""
A =do_resize if do_resize is not None else self.do_resize
A =crop_pct if crop_pct is not None else self.crop_pct
A =resample if resample is not None else self.resample
A =do_center_crop if do_center_crop is not None else self.do_center_crop
A =do_rescale if do_rescale is not None else self.do_rescale
A =rescale_factor if rescale_factor is not None else self.rescale_factor
A =do_normalize if do_normalize is not None else self.do_normalize
A =image_mean if image_mean is not None else self.image_mean
A =image_std if image_std is not None else self.image_std
A =size if size is not None else self.size
A =get_size_dict(snake_case__ , default_to_square=snake_case__ )
A =crop_size if crop_size is not None else self.crop_size
A =get_size_dict(snake_case__ , param_name="crop_size" )
A =make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
A =[to_numpy_array(snake_case__ ) for image in images]
if do_resize:
A =[self.resize(image=snake_case__ , size=snake_case__ , crop_pct=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
A =[self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
A =[self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
A =[self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
A =[to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
A ={'pixel_values': images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
def UpperCamelCase_ ( a_ , a_ ) ->Optional[int]:
A =word.split()
def justify(a_ , a_ , a_ ) -> str:
A =max_width - width
A =len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
A =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
A =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
A =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
A =[]
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
A =[]
A =[]
A =0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase , __lowercase , __lowercase ) )
# reset new line and new width
A =[word], len(__lowercase )
A =max_width - width - len(__lowercase )
answer.append(" ".join(__lowercase ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str]=13 , snake_case__ : Union[str, Any]=7 , snake_case__ : int=True , snake_case__ : Optional[int]=True , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=99 , snake_case__ : Union[str, Any]=32 , snake_case__ : Dict=2 , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=37 , snake_case__ : int="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[str]=5_12 , snake_case__ : Optional[Any]=16 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : str=False , snake_case__ : int=True , snake_case__ : Any="None" , snake_case__ : Dict=3 , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=None , ):
"""simple docstring"""
A =parent
A =batch_size
A =seq_length
A =is_training
A =use_input_mask
A =use_token_type_ids
A =use_labels
A =vocab_size
A =hidden_size
A =num_hidden_layers
A =num_attention_heads
A =intermediate_size
A =hidden_act
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =type_vocab_size
A =type_sequence_label_size
A =initializer_range
A =num_labels
A =num_choices
A =relative_attention
A =position_biased_input
A =pos_att_type
A =scope
def _a ( self : Dict ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =None
if self.use_input_mask:
A =random_attention_mask([self.batch_size, self.seq_length] )
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Dict , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
A =TFDebertaVaModel(config=_A )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =[input_ids, input_mask]
A =model(_A )
A =model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
A =TFDebertaVaForMaskedLM(config=_A )
A ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A =model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Dict , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
A =self.num_labels
A =TFDebertaVaForSequenceClassification(config=_A )
A ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A =model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Tuple , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
A =self.num_labels
A =TFDebertaVaForTokenClassification(config=_A )
A ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A =model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str ):
"""simple docstring"""
A =TFDebertaVaForQuestionAnswering(config=_A )
A ={
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A =model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : List[Any] ):
"""simple docstring"""
A =TFDebertaVaModelTester(self )
A =ConfigTester(self , config_class=_A , hidden_size=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _a ( self : int ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _a ( self : Dict ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def _a ( self : Any ):
"""simple docstring"""
A =TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(_A )
@require_tf
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : int ):
"""simple docstring"""
A =TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
A =tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A =model(_A , attention_mask=_A )[0]
A =tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1E-4 )
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
from __future__ import annotations
__a = '''Muhammad Umer Farooq'''
__a = '''MIT'''
__a = '''1.0.0'''
__a = '''Muhammad Umer Farooq'''
__a = '''[email protected]'''
__a = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class UpperCamelCase__( __a ):
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : str ):
"""simple docstring"""
super().__init__()
A =[]
A =domain
def _a ( self : Dict , snake_case__ : str , snake_case__ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
A =parse.urljoin(self.domain , snake_case__ )
self.urls.append(snake_case__ )
def UpperCamelCase_ ( a_ ) ->str:
return ".".join(get_sub_domain_name(lowerCAmelCase__ ).split("." )[-2:] )
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
return parse.urlparse(lowerCAmelCase__ ).netloc
def UpperCamelCase_ ( a_ = "https://github.com" ) ->List[str]:
A =get_domain_name(lowerCAmelCase__ )
# Initialize the parser
A =Parser(lowerCAmelCase__ )
try:
# Open URL
A =requests.get(lowerCAmelCase__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
A =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
A =requests.get(lowerCAmelCase__ )
# Get the valid email.
A =re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCAmelCase__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCAmelCase__ )
if __name__ == "__main__":
__a = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails))) | 706 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCamelCase__( __lowerCamelCase ):
"""simple docstring"""
_A = 42
_A = 42
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
_A = 42
_A = (1_6, 3_2, 9_6, 2_5_6)
_A = jnp.floataa
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A =[]
for i in range(len(self.block_out_channels ) - 1 ):
A =self.block_out_channels[i]
A =self.block_out_channels[i + 1]
A =nn.Conv(
a_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_ )
A =nn.Conv(
a_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a_ )
A =blocks
A =nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[str] , snake_case__ : List[Any] ):
"""simple docstring"""
A =self.conv_in(a_ )
A =nn.silu(a_ )
for block in self.blocks:
A =block(a_ )
A =nn.silu(a_ )
A =self.conv_out(a_ )
return embedding
@flax_register_to_config
class UpperCamelCase__( nn.Module , __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
_A = 3_2
_A = 4
_A = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_A = False
_A = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_A = 2
_A = 8
_A = None
_A = 1_2_8_0
_A = 0.0
_A = False
_A = jnp.floataa
_A = True
_A = 0
_A = "rgb"
_A = (1_6, 3_2, 9_6, 2_5_6)
def _a ( self : int , snake_case__ : List[Any] ):
"""simple docstring"""
A =(1, self.in_channels, self.sample_size, self.sample_size)
A =jnp.zeros(a_ , dtype=jnp.floataa )
A =jnp.ones((1,) , dtype=jnp.intaa )
A =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A =(1, 3, self.sample_size * 8, self.sample_size * 8)
A =jnp.zeros(a_ , dtype=jnp.floataa )
A =jax.random.split(a_ )
A ={"params": params_rng, "dropout": dropout_rng}
return self.init(a_ , a_ , a_ , a_ , a_ )["params"]
def _a ( self : str ):
"""simple docstring"""
A =self.block_out_channels
A =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A =self.num_attention_heads or self.attention_head_dim
# input
A =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A =FlaxTimestepEmbedding(a_ , dtype=self.dtype )
A =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A =self.only_cross_attention
if isinstance(a_ , a_ ):
A =(only_cross_attention,) * len(self.down_block_types )
if isinstance(a_ , a_ ):
A =(num_attention_heads,) * len(self.down_block_types )
# down
A =[]
A =[]
A =block_out_channels[0]
A =nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
for i, down_block_type in enumerate(self.down_block_types ):
A =output_channel
A =block_out_channels[i]
A =i == len(a_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A =FlaxCrossAttnDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A =FlaxDownBlockaD(
in_channels=a_ , out_channels=a_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a_ )
for _ in range(self.layers_per_block ):
A =nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
if not is_final_block:
A =nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a_ )
A =down_blocks
A =controlnet_down_blocks
# mid
A =block_out_channels[-1]
A =FlaxUNetMidBlockaDCrossAttn(
in_channels=a_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A =nn.Conv(
a_ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : str , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] = 1.0 , snake_case__ : Tuple = True , snake_case__ : int = False , ):
"""simple docstring"""
A =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A =jnp.flip(a_ , axis=1 )
# 1. time
if not isinstance(a_ , jnp.ndarray ):
A =jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(a_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
A =timesteps.astype(dtype=jnp.floataa )
A =jnp.expand_dims(a_ , 0 )
A =self.time_proj(a_ )
A =self.time_embedding(a_ )
# 2. pre-process
A =jnp.transpose(a_ , (0, 2, 3, 1) )
A =self.conv_in(a_ )
A =jnp.transpose(a_ , (0, 2, 3, 1) )
A =self.controlnet_cond_embedding(a_ )
sample += controlnet_cond
# 3. down
A =(sample,)
for down_block in self.down_blocks:
if isinstance(a_ , a_ ):
A =down_block(a_ , a_ , a_ , deterministic=not train )
else:
A =down_block(a_ , a_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
A =self.mid_block(a_ , a_ , a_ , deterministic=not train )
# 5. contronet blocks
A =()
for down_block_res_sample, controlnet_block in zip(a_ , self.controlnet_down_blocks ):
A =controlnet_block(a_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
A =controlnet_down_block_res_samples
A =self.controlnet_mid_block(a_ )
# 6. scaling
A =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a_ , mid_block_res_sample=a_ )
| 707 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_A = Features({"image": Image()} )
_A = Features({"labels": ClassLabel} )
_A = "image"
_A = "labels"
def _a ( self : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A =copy.deepcopy(self )
A =self.label_schema.copy()
A =features[self.label_column]
A =label_schema
return task_template
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 708 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 0 |
from collections.abc import Callable
import numpy as np
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->np.ndarray:
A =int(np.ceil((x_end - xa) / step_size ) )
A =np.zeros((n + 1,) )
A =ya
A =xa
for k in range(_A ):
A =y[k] + step_size * ode_func(_A , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class UpperCamelCase__( _A ):
"""simple docstring"""
_A = "swinv2"
_A = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , snake_case__ : Tuple=2_24 , snake_case__ : Any=4 , snake_case__ : Any=3 , snake_case__ : Optional[Any]=96 , snake_case__ : str=[2, 2, 6, 2] , snake_case__ : Union[str, Any]=[3, 6, 12, 24] , snake_case__ : Tuple=7 , snake_case__ : int=4.0 , snake_case__ : Dict=True , snake_case__ : Any=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : List[str]=0.1 , snake_case__ : int="gelu" , snake_case__ : List[str]=False , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Dict=1E-5 , snake_case__ : List[str]=32 , **snake_case__ : int , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
A =image_size
A =patch_size
A =num_channels
A =embed_dim
A =depths
A =len(UpperCamelCase__ )
A =num_heads
A =window_size
A =mlp_ratio
A =qkv_bias
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =drop_path_rate
A =hidden_act
A =use_absolute_embeddings
A =layer_norm_eps
A =initializer_range
A =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A =int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
A =(0, 0, 0, 0)
| 710 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 0 |
def UpperCamelCase_ ( a_ , a_ = 0 ) ->list:
A =length or len(a_ )
A =False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A =list_data[i + 1], list_data[i]
A =True
return list_data if not swapped else bubble_sort(a_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def UpperCamelCase_ ( a_ ) ->str:
A ="huggingface/label-files"
A ="imagenet-1k-id2label.json"
A =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
A ={int(__snake_case ): v for k, v in idalabel.items()}
A ={v: k for k, v in idalabel.items()}
A ="std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A =BitConfig(
conv_layer=__snake_case , num_labels=1000 , idalabel=__snake_case , labelaid=__snake_case , )
return config
def UpperCamelCase_ ( a_ ) ->Dict:
if "stem.conv" in name:
A =name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
A =name.replace("blocks" , "layers" )
if "head.fc" in name:
A =name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
A ="bit." + name
if "bit" not in name and "classifier" not in name:
A ="bit.encoder." + name
return name
def UpperCamelCase_ ( ) ->Dict:
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
A =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ , a_=False ) ->Union[str, Any]:
A =get_config(__snake_case )
# load original model from timm
A =create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model
A =timm_model.state_dict()
for key in state_dict.copy().keys():
A =state_dict.pop(__snake_case )
A =val.squeeze() if "head" in key else val
# load HuggingFace model
A =BitForImageClassification(__snake_case )
model.eval()
model.load_state_dict(__snake_case )
# create image processor
A =create_transform(**resolve_data_config({} , model=__snake_case ) )
A =transform.transforms
A ={
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A =BitImageProcessor(
do_resize=__snake_case , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__snake_case , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A =prepare_img()
A =transform(__snake_case ).unsqueeze(0 )
A =processor(__snake_case , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__snake_case , __snake_case )
# verify logits
with torch.no_grad():
A =model(__snake_case )
A =outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
A =timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
__a = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 712 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 0 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__a = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def UpperCamelCase_ ( a_=True ) ->int:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowercase_ ) )
class UpperCamelCase__( lowercase_ ):
"""simple docstring"""
_A = None
_A = None
def _a ( self : List[Any] , snake_case__ : Any , snake_case__ : Optional[Any] ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
A =dataset_module_factory(lowerCamelCase_ , cache_dir=lowerCamelCase_ )
A =import_main_class(dataset_module.module_path , dataset=lowerCamelCase_ )
A =builder_cls(
cache_dir=lowerCamelCase_ , config_name=lowerCamelCase_ , hash=dataset_module.hash , )
A ="""/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCamelCase_ ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
A =cached_path(lowerCamelCase_ , cache_dir=lowerCamelCase_ )
self.assertTrue(os.path.exists(lowerCamelCase_ ) )
@pytest.mark.integration
def UpperCamelCase_ ( a_ ) ->List[str]:
A =tmp_path_factory.mktemp("test_hf_gcp" ) / """test_wikipedia_simple"""
A =dataset_module_factory("wikipedia" , cache_dir=lowerCamelCase_ )
A =import_main_class(dataset_module.module_path )
A =builder_cls(
cache_dir=lowerCamelCase_ , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A =None
builder_instance.download_and_prepare()
A =builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def UpperCamelCase_ ( a_ ) ->Any:
A =dataset_module_factory("wikipedia" , cache_dir=lowerCamelCase_ )
A =import_main_class(dataset_module.module_path , dataset=lowerCamelCase_ )
A =builder_cls(
cache_dir=lowerCamelCase_ , config_name="20220301.frr" , hash=dataset_module.hash , )
A =builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert "train" in ds
assert isinstance(ds["train"] , lowerCamelCase_ )
assert next(iter(ds["train"] ) )
| 713 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_ ( a_ = 200_0000 ) ->List[str]:
A =[0]
A =42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
A =0
# the area corresponding to the grid that gives the product closest to target
A =0
# an estimate of b, using the quadratic formula
A =42
# the largest integer less than b_estimate
A =42
# the largest integer less than b_estimate
A =42
# the triangle number corresponding to b_floor
A =42
# the triangle number corresponding to b_ceil
A =42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
A =(-1 + sqrt(1 + 8 * target / triangle_a )) / 2
A =floor(A__ )
A =ceil(A__ )
A =triangle_numbers[b_floor]
A =triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
A =triangle_b_first_guess * triangle_a
A =idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
A =triangle_b_second_guess * triangle_a
A =idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 714 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__( _UpperCamelCase ):
"""simple docstring"""
_A = "informer"
_A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "student_t" , snake_case__ : str = "nll" , snake_case__ : int = 1 , snake_case__ : List[int] = None , snake_case__ : Optional[Union[str, bool]] = "mean" , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : Optional[List[int]] = None , snake_case__ : Optional[List[int]] = None , snake_case__ : int = 64 , snake_case__ : int = 32 , snake_case__ : int = 32 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : bool = True , snake_case__ : str = "gelu" , snake_case__ : float = 0.05 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 1_00 , snake_case__ : float = 0.02 , snake_case__ : Union[str, Any]=True , snake_case__ : str = "prob" , snake_case__ : int = 5 , snake_case__ : bool = True , **snake_case__ : List[Any] , ):
"""simple docstring"""
A =prediction_length
A =context_length or prediction_length
A =distribution_output
A =loss
A =input_size
A =num_time_features
A =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A =scaling
A =num_dynamic_real_features
A =num_static_real_features
A =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
A =cardinality
else:
A =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
A =embedding_dimension
else:
A =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A =num_parallel_samples
# Transformer architecture configuration
A =input_size * len(self.lags_sequence ) + self._number_of_features
A =d_model
A =encoder_attention_heads
A =decoder_attention_heads
A =encoder_ffn_dim
A =decoder_ffn_dim
A =encoder_layers
A =decoder_layers
A =dropout
A =attention_dropout
A =activation_dropout
A =encoder_layerdrop
A =decoder_layerdrop
A =activation_function
A =init_std
A =use_cache
# Informer
A =attention_type
A =sampling_factor
A =distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def _a ( self : str ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 715 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__a = 637_8137.0
__a = 635_6752.31_4245
__a = 6_3_7_8_1_3_7
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->float:
A =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A =atan((1 - flattening) * tan(radians(a__ ) ) )
A =atan((1 - flattening) * tan(radians(a__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A =haversine_distance(a__ , a__ , a__ , a__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A =(b_lata + b_lata) / 2
A =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A =(sin(a__ ) ** 2) * (cos(a__ ) ** 2)
A =cos(sigma / 2 ) ** 2
A =(sigma - sin(a__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A =(cos(a__ ) ** 2) * (sin(a__ ) ** 2)
A =sin(sigma / 2 ) ** 2
A =(sigma + sin(a__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =0
A =len(snake_case__ )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
if len(snake_case__ ) <= 1:
return arr, 0
A =len(snake_case__ ) // 2
A =arr[0:mid]
A =arr[mid:]
A , A =count_inversions_recursive(snake_case__ )
A , A =count_inversions_recursive(snake_case__ )
A , A =_count_cross_inversions(snake_case__ , snake_case__ )
A =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCamelCase_ ( a_ , a_ ) ->Dict:
A =[]
A =A =A =0
while i < len(snake_case__ ) and j < len(snake_case__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCamelCase_ ( ) ->Optional[Any]:
A =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A =count_inversions_bf(snake_case__ )
A , A =count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , snake_case__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A =count_inversions_bf(snake_case__ )
A , A =count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , snake_case__ )
# an empty list should also have zero inversions
A =[]
A =count_inversions_bf(snake_case__ )
A , A =count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , snake_case__ )
if __name__ == "__main__":
main()
| 717 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 0 |
def UpperCamelCase_ ( a_ ) ->int:
if not isinstance(a_ , a_ ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
A =0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 0 |
import random
class UpperCamelCase__:
"""simple docstring"""
@staticmethod
def _a ( snake_case__ : List[Any] ):
"""simple docstring"""
A =[ord(snake_case__ ) for i in text]
A =[]
A =[]
for i in plain:
A =random.randint(1 , 3_00 )
A =(i + k) * k
cipher.append(snake_case__ )
key.append(snake_case__ )
return cipher, key
@staticmethod
def _a ( snake_case__ : str , snake_case__ : Dict ):
"""simple docstring"""
A =[]
for i in range(len(snake_case__ ) ):
A =int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(snake_case__ ) )
return "".join(snake_case__ )
if __name__ == "__main__":
__a = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 719 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A =VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
A =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =self.dummy_uncond_unet
A =DDIMScheduler()
A =self.dummy_vq_model
A =LDMPipeline(unet=SCREAMING_SNAKE_CASE_ , vqvae=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
ldm.to(SCREAMING_SNAKE_CASE_ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
A =torch.manual_seed(0 )
A =ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="numpy" ).images
A =torch.manual_seed(0 )
A =ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="numpy" , return_dict=SCREAMING_SNAKE_CASE_ )[0]
A =image[0, -3:, -3:, -1]
A =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A =np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
A =1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
A =LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(SCREAMING_SNAKE_CASE_ )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
A =torch.manual_seed(0 )
A =ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , output_type="numpy" ).images
A =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
A =np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
A =1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 720 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 0 |
__a = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__a = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__a = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 721 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 700 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCamelCase_ ( a_ , a_=False ) ->List[Any]:
A =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A =[(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase_ ( a_ , a_ , a_=False ) ->int:
for i in range(config.num_hidden_layers ):
if base_model:
A =""
else:
A ="vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A =state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
A =state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A =in_proj_weight[
: config.hidden_size, :
]
A =in_proj_bias[: config.hidden_size]
A =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A =in_proj_weight[
-config.hidden_size :, :
]
A =in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( a_ ) ->Dict:
A =["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def UpperCamelCase_ ( a_ ) ->int:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
A =[
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Optional[int]:
A =dct.pop(_lowerCAmelCase )
A =val
def UpperCamelCase_ ( a_ , a_ ) ->Dict:
A =ViTMSNConfig()
A =1000
A ="datasets/huggingface/label-files"
A ="imagenet-1k-id2label.json"
A =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase ) , "r" ) )
A ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A =idalabel
A ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A =384
A =1536
A =6
elif "l16" in checkpoint_url:
A =1024
A =4096
A =24
A =16
A =0.1
elif "b4" in checkpoint_url:
A =4
elif "l7" in checkpoint_url:
A =7
A =1024
A =4096
A =24
A =16
A =0.1
A =ViTMSNModel(_lowerCAmelCase )
A =torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["target_encoder"]
A =ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCAmelCase )
A =create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , base_model=_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
A =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
A =ViTImageProcessor(
size=config.image_size , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase )
A =image_processor(images=_lowerCAmelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
A =model(**_lowerCAmelCase )
A =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A =torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
A =torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
A =torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
A =torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
A =torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCAmelCase , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__a = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 701 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCamelCase__( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int]=None , snake_case__ : Any=None , snake_case__ : Dict=0 ):
"""simple docstring"""
A =1.0 if scale is None else scale
A =0.0 if loc is None else loc
super().__init__(__lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowerCamelCase )] )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return self.variance.sqrt()
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Any , **snake_case__ : List[Any] ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
A =args_dim
A =nn.ModuleList([nn.Linear(__lowerCamelCase , __lowerCamelCase ) for dim in args_dim.values()] )
A =domain_map
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
A =[proj(__lowerCamelCase ) for proj in self.proj]
return self.domain_map(*__lowerCamelCase )
class UpperCamelCase__( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
super().__init__()
A =function
def _a ( self : Any , snake_case__ : Any , *snake_case__ : Dict ):
"""simple docstring"""
return self.function(__lowerCamelCase , *__lowerCamelCase )
class UpperCamelCase__:
"""simple docstring"""
_A = 4_2
_A = 4_2
_A = 4_2
def __init__( self : Optional[Any] , snake_case__ : Any = 1 ):
"""simple docstring"""
A =dim
A ={k: dim * self.args_dim[k] for k in self.args_dim}
def _a ( self : Union[str, Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*__lowerCamelCase )
else:
return Independent(self.distribution_class(*__lowerCamelCase ) , 1 )
def _a ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str = None , snake_case__ : Optional[int] = None , ):
"""simple docstring"""
A =self._base_distribution(__lowerCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowerCamelCase , loc=__lowerCamelCase , scale=__lowerCamelCase , event_dim=self.event_dim )
@property
def _a ( self : int ):
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return len(self.event_shape )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return 0.0
def _a ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
return ParameterProjection(
in_features=__lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _a ( self : List[Any] , *snake_case__ : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def _a ( snake_case__ : List[Any] ):
"""simple docstring"""
return (x + torch.sqrt(torch.square(__lowerCamelCase ) + 4.0 )) / 2.0
class UpperCamelCase__( lowerCamelCase__ ):
"""simple docstring"""
_A = {"df": 1, "loc": 1, "scale": 1}
_A = StudentT
@classmethod
def _a ( cls : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
A =cls.squareplus(__lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A =2.0 + cls.squareplus(__lowerCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCamelCase__( lowerCamelCase__ ):
"""simple docstring"""
_A = {"loc": 1, "scale": 1}
_A = Normal
@classmethod
def _a ( cls : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
A =cls.squareplus(__lowerCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCamelCase__( lowerCamelCase__ ):
"""simple docstring"""
_A = {"total_count": 1, "logits": 1}
_A = NegativeBinomial
@classmethod
def _a ( cls : Optional[Any] , snake_case__ : int , snake_case__ : Any ):
"""simple docstring"""
A =cls.squareplus(__lowerCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowerCamelCase , logits=__lowerCamelCase )
else:
return Independent(self.distribution_class(total_count=__lowerCamelCase , logits=__lowerCamelCase ) , 1 )
def _a ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[str] = None , snake_case__ : List[str] = None ):
"""simple docstring"""
A =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 702 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int]=14 , snake_case__ : Optional[Any]=7 , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : Any=False , snake_case__ : Union[str, Any]=True , snake_case__ : Union[str, Any]=99 , snake_case__ : List[Any]=32 , snake_case__ : List[str]=4 , snake_case__ : Optional[int]=4 , snake_case__ : Union[str, Any]=4 , snake_case__ : int=37 , snake_case__ : List[str]="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=5_12 , snake_case__ : Union[str, Any]=0.02 , ):
"""simple docstring"""
A =parent
A =batch_size
A =seq_length
A =is_training
A =use_input_mask
A =use_token_type_ids
A =use_labels
A =vocab_size
A =hidden_size
A =rotary_dim
A =num_hidden_layers
A =num_attention_heads
A =intermediate_size
A =hidden_act
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =initializer_range
A =None
A =vocab_size - 1
A =vocab_size - 1
A =vocab_size - 1
def _a ( self : int ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =None
if self.use_input_mask:
A =random_attention_mask([self.batch_size, self.seq_length] )
A =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _a ( self : List[str] ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
A , A , A =config_and_inputs
A ={"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _a ( self : Any , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
A =20
A =model_class_name(_lowercase )
A =model.init_cache(input_ids.shape[0] , _lowercase )
A =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
A =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A =model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
A =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
A =model(
input_ids[:, -1:] , attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , position_ids=_lowercase , )
A =model(_lowercase )
A =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _a ( self : Dict , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
A =20
A =model_class_name(_lowercase )
A =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
A =model.init_cache(input_ids.shape[0] , _lowercase )
A =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A =model(
input_ids[:, :-1] , attention_mask=_lowercase , past_key_values=_lowercase , position_ids=_lowercase , )
A =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
A =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_lowercase , position_ids=_lowercase , )
A =model(_lowercase , attention_mask=_lowercase )
A =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase__( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_A = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_A = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _a ( self : List[str] ):
"""simple docstring"""
A =FlaxGPTJModelTester(self )
def _a ( self : List[str] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A , A , A =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase , _lowercase )
def _a ( self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A , A , A =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_lowercase , _lowercase , _lowercase , _lowercase )
@tooslow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
A =tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=_lowercase , truncation=_lowercase )
A =FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
A =False
A =model.config.eos_token_id
A =jax.jit(model.generate )
A =jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
A =tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
A =[
"Hello this is a long string of text.\n\nI\'m trying to get the text of the",
"Hey, I\'m a little late to the party. I\'m going to",
]
self.assertListEqual(_lowercase , _lowercase )
@is_pt_flax_cross_test
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A =self._prepare_for_class(_lowercase , _lowercase )
A ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A =model_class.__name__[4:] # Skip the "Flax" at the beginning
A =getattr(_lowercase , _lowercase )
A , A =pt_inputs["input_ids"].shape
A =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
A =0
A =1
A =0
A =1
A =pt_model_class(_lowercase ).eval()
A =model_class(_lowercase , dtype=jnp.floataa )
A =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _lowercase )
A =fx_state
with torch.no_grad():
A =pt_model(**_lowercase ).to_tuple()
A =fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
A =model_class.from_pretrained(_lowercase , from_pt=_lowercase )
A =fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _a ( self : Dict ):
"""simple docstring"""
A , A =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A =self._prepare_for_class(_lowercase , _lowercase )
A ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A =model_class.__name__[4:] # Skip the "Flax" at the beginning
A =getattr(_lowercase , _lowercase )
A =pt_model_class(_lowercase ).eval()
A =model_class(_lowercase , dtype=jnp.floataa )
A =load_flax_weights_in_pytorch_model(_lowercase , fx_model.params )
A , A =pt_inputs["input_ids"].shape
A =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
A =0
A =1
A =0
A =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A =pt_model(**_lowercase ).to_tuple()
A =fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
A =pt_model_class.from_pretrained(_lowercase , from_flax=_lowercase )
with torch.no_grad():
A =pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(
len(_lowercase ) , len(_lowercase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_lowercase , _lowercase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _a ( self : Optional[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A =model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
A =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCamelCase_ ( a_ ) ->int:
random.seed(__SCREAMING_SNAKE_CASE )
np.random.seed(__SCREAMING_SNAKE_CASE )
torch.manual_seed(__SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(__SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : int , snake_case__ : str , snake_case__ : List[Any] = 0.9_999 , snake_case__ : int = 0.0 , snake_case__ : Dict = 0 , snake_case__ : int = False , snake_case__ : str = 1.0 , snake_case__ : Union[str, Any] = 2 / 3 , snake_case__ : List[Any] = None , snake_case__ : str = None , **snake_case__ : Optional[int] , ):
"""simple docstring"""
if isinstance(lowercase_ , torch.nn.Module ):
A =(
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowercase_ , standard_warn=lowercase_ , )
A =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A =True
if kwargs.get("max_value" , lowercase_ ) is not None:
A ="The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowercase_ , standard_warn=lowercase_ )
A =kwargs["max_value"]
if kwargs.get("min_value" , lowercase_ ) is not None:
A ="The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowercase_ , standard_warn=lowercase_ )
A =kwargs["min_value"]
A =list(lowercase_ )
A =[p.clone().detach() for p in parameters]
if kwargs.get("device" , lowercase_ ) is not None:
A ="The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowercase_ , standard_warn=lowercase_ )
self.to(device=kwargs["device"] )
A =None
A =decay
A =min_decay
A =update_after_step
A =use_ema_warmup
A =inv_gamma
A =power
A =0
A =None # set in `step()`
A =model_cls
A =model_config
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
A =model_cls.load_config(lowercase_ , return_unused_kwargs=lowercase_ )
A =model_cls.from_pretrained(lowercase_ )
A =cls(model.parameters() , model_cls=lowercase_ , model_config=model.config )
ema_model.load_state_dict(lowercase_ )
return ema_model
def _a ( self : List[str] , snake_case__ : Any ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
A =self.model_cls.from_config(self.model_config )
A =self.state_dict()
state_dict.pop("shadow_params" , lowercase_ )
model.register_to_config(**lowercase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowercase_ )
def _a ( self : Dict , snake_case__ : List[str] ):
"""simple docstring"""
A =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A =(1 + step) / (10 + step)
A =min(lowercase_ , self.decay )
# make sure decay is not smaller than min_decay
A =max(lowercase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _a ( self : int , snake_case__ : str ):
"""simple docstring"""
if isinstance(lowercase_ , torch.nn.Module ):
A =(
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowercase_ , standard_warn=lowercase_ , )
A =parameters.parameters()
A =list(lowercase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A =self.get_decay(self.optimization_step )
A =decay
A =1 - decay
A =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowercase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A =deepspeed.zero.GatheredParameters(lowercase_ , modifier_rank=lowercase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowercase_ )
def _a ( self : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(lowercase_ )
for s_param, param in zip(self.shadow_params , lowercase_ ):
param.data.copy_(s_param.to(param.device ).data )
def _a ( self : Tuple , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
A =[
p.to(device=lowercase_ , dtype=lowercase_ ) if p.is_floating_point() else p.to(device=lowercase_ )
for p in self.shadow_params
]
def _a ( self : Optional[int] ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _a ( self : Dict , snake_case__ : int ):
"""simple docstring"""
A =[param.detach().cpu().clone() for param in parameters]
def _a ( self : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowercase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A =None
def _a ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
A =copy.deepcopy(lowercase_ )
A =state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
A =state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowercase_ ):
raise ValueError("Invalid min_decay" )
A =state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowercase_ ):
raise ValueError("Invalid optimization_step" )
A =state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowercase_ ):
raise ValueError("Invalid update_after_step" )
A =state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowercase_ ):
raise ValueError("Invalid use_ema_warmup" )
A =state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
A =state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
A =state_dict.get("shadow_params" , lowercase_ )
if shadow_params is not None:
A =shadow_params
if not isinstance(self.shadow_params , lowercase_ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowercase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 704 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a = 'src/transformers'
__a = 'docs/source/en/tasks'
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Tuple:
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
A =f.readlines()
# Find the start prompt.
A =0
while not lines[start_index].startswith(_lowerCamelCase ):
start_index += 1
start_index += 1
A =start_index
while not lines[end_index].startswith(_lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
__a = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =TASK_GUIDE_TO_MODELS[task_guide]
A =SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCamelCase , set() )
A ={
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def UpperCamelCase_ ( a_ , a_=False ) ->Any:
A =_find_text_in_file(
filename=os.path.join(_lowerCamelCase , _lowerCamelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
A =get_model_list_for_task(_lowerCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__a = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 706 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : List[str] ):
"""simple docstring"""
A =ort.SessionOptions()
A =False
return options
def _a ( self : Dict ):
"""simple docstring"""
A =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
A =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
A =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
A =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A ="A red cat sitting on a park bench"
A =np.random.RandomState(0 )
A =pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__UpperCamelCase , output_type="np" , )
A =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 707 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__a = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__a = (7_2_0, 1_2_8_0) # Height, Width
__a = (0.4, 0.6) # if height or width lower than this scale, drop it.
__a = 1 / 1_0_0
__a = """"""
__a = """"""
__a = """"""
__a = 2_5_0
def UpperCamelCase_ ( ) ->Dict:
A =get_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
for index in range(lowerCAmelCase__ ):
A =random.sample(range(len(lowerCAmelCase__ ) ) , 4 )
A =update_image_and_anno(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , filter_scale=lowerCAmelCase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A =random_chars(32 )
A =path.split(os.sep )[-1].rsplit("." , 1 )[0]
A =f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , lowerCAmelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
A =[]
for anno in new_annos:
A =anno[3] - anno[1]
A =anno[4] - anno[2]
A =anno[1] + width / 2
A =anno[2] + height / 2
A =f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(lowerCAmelCase__ )
with open(f'''{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def UpperCamelCase_ ( a_ , a_ ) ->Dict:
A =[]
A =[]
for label_file in glob.glob(os.path.join(lowerCAmelCase__ , "*.txt" ) ):
A =label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowerCAmelCase__ ) as in_file:
A =in_file.readlines()
A =os.path.join(lowerCAmelCase__ , f'''{label_name}.jpg''' )
A =[]
for obj_list in obj_lists:
A =obj_list.rstrip("\n" ).split(" " )
A =float(obj[1] ) - float(obj[3] ) / 2
A =float(obj[2] ) - float(obj[4] ) / 2
A =float(obj[1] ) + float(obj[3] ) / 2
A =float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowerCAmelCase__ )
labels.append(lowerCAmelCase__ )
return img_paths, labels
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ , a_ = 0.0 , ) ->List[str]:
A =np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
A =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A =int(scale_x * output_size[1] )
A =int(scale_y * output_size[0] )
A =[]
A =[]
for i, index in enumerate(lowerCAmelCase__ ):
A =all_img_list[index]
path_list.append(lowerCAmelCase__ )
A =all_annos[index]
A =cva.imread(lowerCAmelCase__ )
if i == 0: # top-left
A =cva.resize(lowerCAmelCase__ , (divid_point_x, divid_point_y) )
A =img
for bbox in img_annos:
A =bbox[1] * scale_x
A =bbox[2] * scale_y
A =bbox[3] * scale_x
A =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
A =cva.resize(lowerCAmelCase__ , (output_size[1] - divid_point_x, divid_point_y) )
A =img
for bbox in img_annos:
A =scale_x + bbox[1] * (1 - scale_x)
A =bbox[2] * scale_y
A =scale_x + bbox[3] * (1 - scale_x)
A =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
A =cva.resize(lowerCAmelCase__ , (divid_point_x, output_size[0] - divid_point_y) )
A =img
for bbox in img_annos:
A =bbox[1] * scale_x
A =scale_y + bbox[2] * (1 - scale_y)
A =bbox[3] * scale_x
A =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
A =cva.resize(
lowerCAmelCase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
A =img
for bbox in img_annos:
A =scale_x + bbox[1] * (1 - scale_x)
A =scale_y + bbox[2] * (1 - scale_y)
A =scale_x + bbox[3] * (1 - scale_x)
A =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
A =[
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCamelCase_ ( a_ ) ->int:
assert number_char > 1, "The number of character should greater than 1"
A =ascii_lowercase + digits
return "".join(random.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 709 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
A =[[1, 2, 4], [1, 2, 3, 4]]
A =DisjunctiveConstraint(lowerCamelCase_ )
self.assertTrue(isinstance(dc.token_ids , lowerCamelCase_ ) )
with self.assertRaises(lowerCamelCase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCamelCase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _a ( self : Dict ):
"""simple docstring"""
A =[[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCamelCase_ ):
DisjunctiveConstraint(lowerCamelCase_ ) # fails here
def _a ( self : str ):
"""simple docstring"""
A =[[1, 2, 3], [1, 2, 4]]
A =DisjunctiveConstraint(lowerCamelCase_ )
A , A , A =dc.update(1 )
A =stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A , A , A =dc.update(2 )
A =stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A , A , A =dc.update(3 )
A =stepped is True and completed is True and reset is False
self.assertTrue(lowerCamelCase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
A =DisjunctiveConstraint(lowerCamelCase_ )
A , A , A =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A , A , A =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A , A , A =dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
A , A , A =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
A , A , A =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
A , A , A =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
A , A , A =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 710 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""CLIPFeatureExtractor"""]
__a = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
__a = 6_3_7_8_1_3_7.0
__a = 6_3_5_6_7_5_2.3_1_4_2_4_5
__a = 6_3_7_8_1_3_7
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->float:
A =(AXIS_A - AXIS_B) / AXIS_A
A =atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
A =atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
A =radians(__lowerCAmelCase )
A =radians(__lowerCAmelCase )
# Equation
A =sin((phi_a - phi_a) / 2 )
A =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A =sqrt(sin_sq_phi + (cos(__lowerCAmelCase ) * cos(__lowerCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCamelCase__:
"""simple docstring"""
_A = BlenderbotConfig
_A = {}
_A = "gelu"
def __init__( self : int , snake_case__ : List[str] , snake_case__ : Any=13 , snake_case__ : int=7 , snake_case__ : List[Any]=True , snake_case__ : List[str]=False , snake_case__ : Optional[int]=99 , snake_case__ : int=32 , snake_case__ : int=2 , snake_case__ : Optional[int]=4 , snake_case__ : List[str]=37 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Any=20 , snake_case__ : Any=2 , snake_case__ : Optional[int]=1 , snake_case__ : int=0 , ):
"""simple docstring"""
A =parent
A =batch_size
A =seq_length
A =is_training
A =use_labels
A =vocab_size
A =hidden_size
A =num_hidden_layers
A =num_attention_heads
A =intermediate_size
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =eos_token_id
A =pad_token_id
A =bos_token_id
def _a ( self : int ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A =tf.concat([input_ids, eos_tensor] , axis=1 )
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A =prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
A =TFBlenderbotModel(config=_UpperCAmelCase ).get_decoder()
A =inputs_dict['''input_ids''']
A =input_ids[:1, :]
A =inputs_dict['''attention_mask'''][:1, :]
A =inputs_dict['''head_mask''']
A =1
# first forward pass
A =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
A =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A =ids_tensor((self.batch_size, 3) , config.vocab_size )
A =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A =tf.concat([input_ids, next_tokens] , axis=-1 )
A =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
A =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A =output_from_no_past[:, -3:, random_slice_idx]
A =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 )
def UpperCamelCase_ ( a_ , a_ , a_ , a_=None , a_=None , a_=None , a_=None , a_=None , ) ->int:
if attention_mask is None:
A =tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
_A = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_A = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_A = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A = True
_A = False
_A = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =TFBlenderbotModelTester(self )
A =ConfigTester(self , config_class=_UpperCAmelCase )
def _a ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : int ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
_A = ["My friends are cool but they eat too many carbs."]
_A = "facebook/blenderbot-400M-distill"
@cached_property
def _a ( self : int ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
A =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.tokenizer(self.src_text , return_tensors="tf" )
A =self.model.generate(
model_inputs.input_ids , )
A =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 713 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 0 |
def UpperCamelCase_ ( a_ = 100 ) ->int:
A =n * (n + 1) * (2 * n + 1) / 6
A =(n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 714 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def UpperCamelCase_ ( *a_ ) ->List[Any]:
with open(UpperCAmelCase__ , "r" ) as fh:
fcntl.flock(UpperCAmelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCAmelCase__ )
finally:
fcntl.flock(UpperCAmelCase__ , fcntl.LOCK_UN )
__a = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
__a = torch.device("""cuda""", local_rank)
__a = socket.gethostname()
__a = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__a = dist.get_rank()
__a = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 715 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def UpperCamelCase_ ( a_ , a_=False ) ->List[str]:
A =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A =[(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def UpperCamelCase_ ( a_ , a_ , a_=False ) ->Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
A =""
else:
A ="deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A =in_proj_weight[
: config.hidden_size, :
]
A =in_proj_bias[: config.hidden_size]
A =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A =in_proj_weight[
-config.hidden_size :, :
]
A =in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Any:
A =dct.pop(_UpperCamelCase )
A =val
def UpperCamelCase_ ( ) ->Tuple:
A ="http://images.cocodataset.org/val2017/000000039769.jpg"
A =Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_ ) ->Tuple:
A =DeiTConfig()
# all deit models have fine-tuned heads
A =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A =1000
A ="huggingface/label-files"
A ="imagenet-1k-id2label.json"
A =json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
A ={int(_UpperCamelCase ): v for k, v in idalabel.items()}
A =idalabel
A ={v: k for k, v in idalabel.items()}
A =int(deit_name[-6:-4] )
A =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
A =192
A =768
A =12
A =3
elif deit_name[9:].startswith("small" ):
A =384
A =1536
A =12
A =6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
A =1024
A =4096
A =24
A =16
# load original model from timm
A =timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A =timm_model.state_dict()
A =create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
A =DeiTForImageClassificationWithTeacher(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
A =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A =DeiTImageProcessor(size=_UpperCamelCase , crop_size=config.image_size )
A =image_processor(images=prepare_img() , return_tensors="pt" )
A =encoding["pixel_values"]
A =model(_UpperCamelCase )
A =timm_model(_UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCamelCase , outputs.logits , atol=1E-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__a = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__a = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCamelCase_ ( a_ ) ->str:
if "://" in dataset_path:
A =dataset_path.split("://" )[1]
return dataset_path
def UpperCamelCase_ ( a_ ) ->str:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Tuple:
A =not is_remote_filesystem(a_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(a_ ) , fs._strip_protocol(a_ ) )
else:
fs.mv(a_ , a_ , recursive=a_ )
def UpperCamelCase_ ( ) ->int:
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
A =None
A =None
A =threading.Lock()
| 717 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__a = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_A = UNetaDModel
_A = "sample"
@property
def _a ( self : str ):
"""simple docstring"""
A =4
A =3
A =(32, 32)
A =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
A =torch.tensor([10] ).to(snake_case__ )
return {"sample": noise, "timestep": time_step}
@property
def _a ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return (3, 32, 32)
def _a ( self : str ):
"""simple docstring"""
A ={
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
A =self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_A = UNetaDModel
_A = "sample"
@property
def _a ( self : List[Any] ):
"""simple docstring"""
A =4
A =4
A =(32, 32)
A =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
A =torch.tensor([10] ).to(snake_case__ )
return {"sample": noise, "timestep": time_step}
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return (4, 32, 32)
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return (4, 32, 32)
def _a ( self : Dict ):
"""simple docstring"""
A ={
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
A =self.dummy_input
return init_dict, inputs_dict
def _a ( self : Dict ):
"""simple docstring"""
A =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case__ )
A =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _a ( self : Optional[int] ):
"""simple docstring"""
A =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case__ )
model.to(snake_case__ )
A =model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case__ )
model_accelerate.to(snake_case__ )
model_accelerate.eval()
A =torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
A =noise.to(snake_case__ )
A =torch.tensor([10] * noise.shape[0] ).to(snake_case__ )
A =model_accelerate(snake_case__ , snake_case__ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
A =UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=snake_case__ , low_cpu_mem_usage=snake_case__ )
model_normal_load.to(snake_case__ )
model_normal_load.eval()
A =model_normal_load(snake_case__ , snake_case__ )['''sample''']
assert torch_all_close(snake_case__ , snake_case__ , rtol=1E-3 )
def _a ( self : str ):
"""simple docstring"""
A =UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(snake_case__ )
A =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A =noise.to(snake_case__ )
A =torch.tensor([10] * noise.shape[0] ).to(snake_case__ )
with torch.no_grad():
A =model(snake_case__ , snake_case__ ).sample
A =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A =torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(snake_case__ , snake_case__ , rtol=1E-3 ) )
class UpperCamelCase__( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_A = UNetaDModel
_A = "sample"
@property
def _a ( self : Dict , snake_case__ : List[Any]=(32, 32) ):
"""simple docstring"""
A =4
A =3
A =floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
A =torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case__ )
return {"sample": noise, "timestep": time_step}
@property
def _a ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return (3, 32, 32)
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A ={
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
A =self.dummy_input
return init_dict, inputs_dict
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A =UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case__ )
A =self.dummy_input
A =floats_tensor((4, 3) + (2_56, 2_56) ).to(snake_case__ )
A =noise
A =model(**snake_case__ )
assert image is not None, "Make sure output is not None"
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(snake_case__ )
A =4
A =3
A =(2_56, 2_56)
A =torch.ones((batch_size, num_channels) + sizes ).to(snake_case__ )
A =torch.tensor(batch_size * [1E-4] ).to(snake_case__ )
with torch.no_grad():
A =model(snake_case__ , snake_case__ ).sample
A =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A =torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(snake_case__ , snake_case__ , rtol=1E-2 ) )
def _a ( self : Dict ):
"""simple docstring"""
A =UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(snake_case__ )
A =4
A =3
A =(32, 32)
A =torch.ones((batch_size, num_channels) + sizes ).to(snake_case__ )
A =torch.tensor(batch_size * [1E-4] ).to(snake_case__ )
with torch.no_grad():
A =model(snake_case__ , snake_case__ ).sample
A =output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A =torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(snake_case__ , snake_case__ , rtol=1E-2 ) )
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
| 719 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__a = logging.get_logger(__name__)
class UpperCamelCase__( lowercase_ ):
"""simple docstring"""
def __init__( self : Any , *snake_case__ : str , **snake_case__ : Tuple ):
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 720 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def UpperCamelCase_ ( ) ->Optional[Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
A ="__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , lowerCamelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def UpperCamelCase_ ( ) ->str:
assert _test_patching.open is open
A ="__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , lowerCamelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def UpperCamelCase_ ( ) ->List[Any]:
# pandas.read_csv is not present in _test_patching
A ="__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , lowerCamelCase__ ):
pass
def UpperCamelCase_ ( ) ->Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
A ="__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , lowerCamelCase__ ) is None
with patch_submodule(_test_patching , "len" , lowerCamelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def UpperCamelCase_ ( ) ->Union[str, Any]:
A ="__test_patch_submodule_start_and_stop_mock__"
A =patch_submodule(_test_patching , "open" , lowerCamelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def UpperCamelCase_ ( ) ->int:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
A ="__test_patch_submodule_successive_join__"
A ="__test_patch_submodule_successive_dirname__"
A ="__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , lowerCamelCase__ ):
with patch_submodule(_test_patching , "os.rename" , lowerCamelCase__ ):
with patch_submodule(_test_patching , "os.path.dirname" , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , lowerCamelCase__ ):
with patch_submodule(_test_patching , "os.path.join" , lowerCamelCase__ ):
with patch_submodule(_test_patching , "os.path.dirname" , lowerCamelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def UpperCamelCase_ ( ) ->Optional[Any]:
A ="__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , lowerCamelCase__ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , lowerCamelCase__ ):
pass
| 721 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "xlm-roberta-xl"
def __init__( self : int , snake_case__ : Any=25_08_80 , snake_case__ : Dict=25_60 , snake_case__ : str=36 , snake_case__ : int=32 , snake_case__ : List[str]=1_02_40 , snake_case__ : Dict="gelu" , snake_case__ : Any=0.1 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=5_14 , snake_case__ : Any=1 , snake_case__ : List[Any]=0.02 , snake_case__ : Dict=1E-05 , snake_case__ : Union[str, Any]=1 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]="absolute" , snake_case__ : List[str]=True , snake_case__ : Dict=None , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
A =vocab_size
A =hidden_size
A =num_hidden_layers
A =num_attention_heads
A =hidden_act
A =intermediate_size
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =type_vocab_size
A =initializer_range
A =layer_norm_eps
A =position_embedding_type
A =use_cache
A =classifier_dropout
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
@property
def _a ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
A ={0: "batch", 1: "choice", 2: "sequence"}
else:
A ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 700 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 701 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( a_ ) ->List[str]:
if "emb" in name:
A =name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
A =name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
A =name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
A =name.replace("linear1" , "fc1" )
if "linear2" in name:
A =name.replace("linear2" , "fc2" )
if "norm1" in name:
A =name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
A =name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
A =name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
A =name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
A =name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
A =name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def UpperCamelCase_ ( a_ , a_ ) ->Tuple[Dict, Dict]:
A =list(state_dict.keys() )
A ={}
for key in keys:
A =state_dict.pop(a_ )
A =rename_keys(a_ )
if "in_proj_weight" in key:
# split fused qkv proj
A =val[:hidden_size, :]
A =val[hidden_size : 2 * hidden_size, :]
A =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A =val
else:
A =val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( a_ ) ->MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
A =1024
A =24
A =16
elif checkpoint == "medium":
A =1536
A =48
A =24
elif checkpoint == "large":
A =2048
A =48
A =32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A =MusicgenDecoderConfig(
hidden_size=a_ , ffn_dim=hidden_size * 4 , num_hidden_layers=a_ , num_attention_heads=a_ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( a_ , a_=None , a_=None , a_="cpu" ) ->Union[str, Any]:
A =MusicGen.get_pretrained(a_ , device=a_ )
A =decoder_config_from_checkpoint(a_ )
A =fairseq_model.lm.state_dict()
A , A =rename_state_dict(
a_ , hidden_size=decoder_config.hidden_size )
A =TaEncoderModel.from_pretrained("t5-base" )
A =EncodecModel.from_pretrained("facebook/encodec_32khz" )
A =MusicgenForCausalLM(a_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A , A =decoder.load_state_dict(a_ , strict=a_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a_ )
if len(a_ ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(a_ ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A =MusicgenForConditionalGeneration(text_encoder=a_ , audio_encoder=a_ , decoder=a_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a_ )
# check we can do a forward pass
A =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A =model(input_ids=a_ , decoder_input_ids=a_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
A =AutoTokenizer.from_pretrained("t5-base" )
A =AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
A =MusicgenProcessor(feature_extractor=a_ , tokenizer=a_ )
# set the appropriate bos/pad token ids
A =2048
A =2048
# set other default generation config params
A =int(30 * audio_encoder.config.frame_rate )
A =True
A =3.0
if pytorch_dump_folder is not None:
Path(a_ ).mkdir(exist_ok=a_ )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(a_ )
processor.push_to_hub(a_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
__a = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 689 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
__a = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
__a = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def UpperCamelCase_ ( a_ ) ->Union[str, Any]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[str]="<s>" , snake_case__ : Dict="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Dict="<unk>" , snake_case__ : List[Any]="<pad>" , snake_case__ : List[Any]="<mask>" , **snake_case__ : str , ):
"""simple docstring"""
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
A =vocab_file
A =merges_file
A ={}
A =0
A =1
A =2
A =3
self.add_from_file(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[:-1]
A =[tuple(merge.split()[:-1] ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
def _a ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A =[self.cls_token_id]
A =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
A =[self.sep_token_id]
A =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : int , snake_case__ : str ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : int , snake_case__ : str ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Union[str, Any] , snake_case__ : Dict ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
A =f.readlines()
for lineTmp in lines:
A =lineTmp.strip()
A =line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
A =line[:idx]
A =len(self.encoder )
| 702 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase_ ( a_ ) ->Tuple:
A =FileLock(str(tmpdir / "foo.lock" ) )
A =FileLock(str(tmpdir / "foo.lock" ) )
A =0.01
with locka.acquire():
with pytest.raises(a_ ):
A =time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def UpperCamelCase_ ( a_ ) ->List[Any]:
A ="a" * 1000 + ".lock"
A =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 689 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__a = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(lowerCAmelCase__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "rag"
_A = True
def __init__( self : Optional[int] , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=True , snake_case__ : str=None , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : str=None , snake_case__ : Optional[Any]=None , snake_case__ : Dict=" / " , snake_case__ : List[Any]=" // " , snake_case__ : Tuple=5 , snake_case__ : Tuple=3_00 , snake_case__ : str=7_68 , snake_case__ : List[Any]=8 , snake_case__ : int="wiki_dpr" , snake_case__ : Optional[Any]="train" , snake_case__ : Tuple="compressed" , snake_case__ : int=None , snake_case__ : Any=None , snake_case__ : int=False , snake_case__ : Union[str, Any]=False , snake_case__ : List[Any]=0.0 , snake_case__ : str=True , snake_case__ : List[Any]=False , snake_case__ : Tuple=False , snake_case__ : str=False , snake_case__ : Any=True , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[int] , ):
"""simple docstring"""
super().__init__(
bos_token_id=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , prefix=snake_case__ , vocab_size=snake_case__ , **snake_case__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
A =kwargs.pop("question_encoder" )
A =question_encoder_config.pop("model_type" )
A =kwargs.pop("generator" )
A =decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
A =AutoConfig.for_model(snake_case__ , **snake_case__ )
A =AutoConfig.for_model(snake_case__ , **snake_case__ )
A =reduce_loss
A =label_smoothing
A =exclude_bos_score
A =do_marginalize
A =title_sep
A =doc_sep
A =n_docs
A =max_combined_length
A =dataset
A =dataset_split
A =index_name
A =retrieval_vector_size
A =retrieval_batch_size
A =passages_path
A =index_path
A =use_dummy_dataset
A =output_retrieved
A =do_deduplication
A =use_cache
if self.forced_eos_token_id is None:
A =getattr(self.generator , "forced_eos_token_id" , snake_case__ )
@classmethod
def _a ( cls : Union[str, Any] , snake_case__ : PretrainedConfig , snake_case__ : PretrainedConfig , **snake_case__ : Dict ):
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case__ )
def _a ( self : List[str] ):
"""simple docstring"""
A =copy.deepcopy(self.__dict__ )
A =self.question_encoder.to_dict()
A =self.generator.to_dict()
A =self.__class__.model_type
return output
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def _a ( self : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
A =f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
A ={"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=snake_case__ , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def _a ( self : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def _a ( self : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
A =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A =list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
A =list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A =(
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 704 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__a = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__a = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
__a = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
"""simple docstring"""
def _a ( self : Any ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _a ( self : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : int = CHRF.CHAR_ORDER , snake_case__ : int = CHRF.WORD_ORDER , snake_case__ : int = CHRF.BETA , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , ):
"""simple docstring"""
A =len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A =[[refs[i] for refs in references] for i in range(snake_case__ )]
A =CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A =sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 689 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def UpperCamelCase_ ( a_ ) ->Optional[Any]:
A =botoa.client("iam" )
A ={
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a_ , AssumeRolePolicyDocument=json.dumps(a_ , indent=2 ) )
A ={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a_ , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(a_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def UpperCamelCase_ ( a_ ) ->List[str]:
A =botoa.client("iam" )
return iam_client.get_role(RoleName=a_ )["Role"]["Arn"]
def UpperCamelCase_ ( ) ->Optional[Any]:
A =_ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , a_ , )
A =None
if credentials_configuration == 0:
A =_ask_field("Enter your AWS Profile name: [default] " , default="default" )
A =aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
A =_ask_field("AWS Access Key ID: " )
A =aws_access_key_id
A =_ask_field("AWS Secret Access Key: " )
A =aws_secret_access_key
A =_ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
A =aws_region
A =_ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , a_ , )
if role_management == 0:
A =_ask_field("Enter your IAM role name: " )
else:
A ="accelerate_sagemaker_execution_role"
print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(a_ )
A =_ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A =None
if is_custom_docker_image:
A =_ask_field("Enter your Docker image: " , lambda a_ : str(a_ ).lower() )
A =_ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A =None
if is_sagemaker_inputs_enabled:
A =_ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda a_ : str(a_ ).lower() , )
A =_ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A =None
if is_sagemaker_metrics_enabled:
A =_ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda a_ : str(a_ ).lower() , )
A =_ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
A ={}
A =_ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
if use_dynamo:
A ="dynamo_"
A =_ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
A =_ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
if use_custom_options:
A =_ask_options(
"Which mode do you want to use?" , a_ , lambda a_ : TORCH_DYNAMO_MODES[int(a_ )] , default="default" , )
A =_ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A =_ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=a_ , error_message="Please enter yes or no." , )
A ="Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
A =_ask_options(
a_ , a_ , lambda a_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
A =_ask_field(a_ , lambda a_ : str(a_ ).lower() , default="ml.p3.2xlarge" )
A =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
A =_ask_field(
"How many machines do you want use? [1]: " , a_ , default=1 , )
A =_ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=a_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=a_ , use_cpu=a_ , dynamo_config=a_ , eca_instance_type=a_ , profile=a_ , region=a_ , iam_role_name=a_ , mixed_precision=a_ , num_machines=a_ , sagemaker_inputs_file=a_ , sagemaker_metrics_file=a_ , )
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__a = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__a = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def UpperCamelCase_ ( a_ ) ->Optional[int]:
A =numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=a_ )[0]
@deprecated(a_ , "Please use tf.data to implement this functionality." )
def UpperCamelCase_ ( a_ ) ->List[str]:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=a_ ) as bytestream:
A =_readaa(a_ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
A =_readaa(a_ )
A =_readaa(a_ )
A =_readaa(a_ )
A =bytestream.read(rows * cols * num_images )
A =numpy.frombuffer(a_ , dtype=numpy.uinta )
A =data.reshape(a_ , a_ , a_ , 1 )
return data
@deprecated(a_ , "Please use tf.one_hot on tensors." )
def UpperCamelCase_ ( a_ , a_ ) ->List[Any]:
A =labels_dense.shape[0]
A =numpy.arange(a_ ) * num_classes
A =numpy.zeros((num_labels, num_classes) )
A =1
return labels_one_hot
@deprecated(a_ , "Please use tf.data to implement this functionality." )
def UpperCamelCase_ ( a_ , a_=False , a_=10 ) ->Optional[Any]:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=a_ ) as bytestream:
A =_readaa(a_ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
A =_readaa(a_ )
A =bytestream.read(a_ )
A =numpy.frombuffer(a_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(a_ , a_ )
return labels
class UpperCamelCase__:
"""simple docstring"""
@deprecated(
snake_case__ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : str=False , snake_case__ : str=False , snake_case__ : Any=dtypes.floataa , snake_case__ : Dict=True , snake_case__ : List[Any]=None , ):
"""simple docstring"""
A , A =random_seed.get_seed(snake_case__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A =dtypes.as_dtype(snake_case__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
A =1_00_00
A =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
A =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A =images.astype(numpy.floataa )
A =numpy.multiply(snake_case__ , 1.0 / 255.0 )
A =images
A =labels
A =0
A =0
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return self._images
@property
def _a ( self : Any ):
"""simple docstring"""
return self._labels
@property
def _a ( self : Tuple ):
"""simple docstring"""
return self._num_examples
@property
def _a ( self : Dict ):
"""simple docstring"""
return self._epochs_completed
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple=False , snake_case__ : str=True ):
"""simple docstring"""
if fake_data:
A =[1] * 7_84
A =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(snake_case__ )],
[fake_label for _ in range(snake_case__ )],
)
A =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A =numpy.arange(self._num_examples )
numpy.random.shuffle(snake_case__ )
A =self.images[perma]
A =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A =self._num_examples - start
A =self._images[start : self._num_examples]
A =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A =numpy.arange(self._num_examples )
numpy.random.shuffle(snake_case__ )
A =self.images[perm]
A =self.labels[perm]
# Start next epoch
A =0
A =batch_size - rest_num_examples
A =self._index_in_epoch
A =self._images[start:end]
A =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(a_ , "Please write your own downloading logic." )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->Dict:
if not gfile.Exists(a_ ):
gfile.MakeDirs(a_ )
A =os.path.join(a_ , a_ )
if not gfile.Exists(a_ ):
urllib.request.urlretrieve(a_ , a_ ) # noqa: S310
with gfile.GFile(a_ ) as f:
A =f.size()
print("Successfully downloaded" , a_ , a_ , "bytes." )
return filepath
@deprecated(
a_ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def UpperCamelCase_ ( a_ , a_=False , a_=False , a_=dtypes.floataa , a_=True , a_=5000 , a_=None , a_=DEFAULT_SOURCE_URL , ) ->List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=a_ , one_hot=a_ , dtype=a_ , seed=a_ )
A =fake()
A =fake()
A =fake()
return _Datasets(train=a_ , validation=a_ , test=a_ )
if not source_url: # empty string check
A =DEFAULT_SOURCE_URL
A ="train-images-idx3-ubyte.gz"
A ="train-labels-idx1-ubyte.gz"
A ="t10k-images-idx3-ubyte.gz"
A ="t10k-labels-idx1-ubyte.gz"
A =_maybe_download(
a_ , a_ , source_url + train_images_file )
with gfile.Open(a_ , "rb" ) as f:
A =_extract_images(a_ )
A =_maybe_download(
a_ , a_ , source_url + train_labels_file )
with gfile.Open(a_ , "rb" ) as f:
A =_extract_labels(a_ , one_hot=a_ )
A =_maybe_download(
a_ , a_ , source_url + test_images_file )
with gfile.Open(a_ , "rb" ) as f:
A =_extract_images(a_ )
A =_maybe_download(
a_ , a_ , source_url + test_labels_file )
with gfile.Open(a_ , "rb" ) as f:
A =_extract_labels(a_ , one_hot=a_ )
if not 0 <= validation_size <= len(a_ ):
A =(
"Validation size should be between 0 and "
f'''{len(a_ )}. Received: {validation_size}.'''
)
raise ValueError(a_ )
A =train_images[:validation_size]
A =train_labels[:validation_size]
A =train_images[validation_size:]
A =train_labels[validation_size:]
A ={"dtype": dtype, "reshape": reshape, "seed": seed}
A =_DataSet(a_ , a_ , **a_ )
A =_DataSet(a_ , a_ , **a_ )
A =_DataSet(a_ , a_ , **a_ )
return _Datasets(train=a_ , validation=a_ , test=a_ ) | 706 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[str] , snake_case__ : Optional[int] , ):
"""simple docstring"""
A =parent
A =13
A =7
A =True
A =True
A =True
A =True
A =True
A =False
A =False
A =False
A =2
A =99
A =0
A =32
A =2
A =4
A =0.1
A =0.1
A =5_12
A =16
A =2
A =0.02
A =3
A =4
A ="last"
A =True
A =None
A =0
def _a ( self : Optional[Any] ):
"""simple docstring"""
A =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A =None
if self.use_input_lengths:
A =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A =None
if self.use_token_type_ids:
A =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A =None
A =None
A =None
if self.use_labels:
A =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A =ids_tensor([self.batch_size] , self.num_choices )
A =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self : Any , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
A =TFFlaubertModel(config=snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
A =[input_ids, input_mask]
A =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertWithLMHeadModel(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , ):
"""simple docstring"""
A =TFFlaubertForQuestionAnsweringSimple(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any , ):
"""simple docstring"""
A =TFFlaubertForSequenceClassification(snake_case__ )
A ={"input_ids": input_ids, "lengths": input_lengths}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , ):
"""simple docstring"""
A =self.num_labels
A =TFFlaubertForTokenClassification(config=snake_case__ )
A ={"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any , ):
"""simple docstring"""
A =self.num_choices
A =TFFlaubertForMultipleChoice(config=snake_case__ )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A =tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
A ={
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Any ):
"""simple docstring"""
A =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) =config_and_inputs
A ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
_A = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self : Optional[int] ):
"""simple docstring"""
A =TFFlaubertModelTester(self )
A =ConfigTester(self , config_class=snake_case__ , emb_dim=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case__ )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A =TFFlaubertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A =TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A =tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A =model(snake_case__ )[0]
A =tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
A =tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 689 | 0 |
def UpperCamelCase_ ( a_ , a_ ) ->str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
A =str(bin(a_ ) )[2:] # remove the leading "0b"
A =str(bin(a_ ) )[2:] # remove the leading "0b"
A =max(len(a_ ) , len(a_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(a_ ) , b_binary.zfill(a_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def _a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
A =AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
A =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def _a ( self : int ):
"""simple docstring"""
A ="cpu" # ensure determinism for the device-dependent torch.Generator
A =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
A =DDPMScheduler()
A =AudioDiffusionPipeline(vqvae=snake_case__ , unet=self.dummy_unet , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 )
A =output.audios[0]
A =output.images[0]
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ , steps=4 , return_dict=snake_case__ )
A =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
A =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
A =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
A =DDIMScheduler()
A =self.dummy_vqvae_and_unet
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(raw_audio=snake_case__ , generator=snake_case__ , start_step=5 , steps=10 )
A =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
A =self.dummy_unet_condition
A =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=snake_case__ , mel=snake_case__ , scheduler=snake_case__ )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
np.random.seed(0 )
A =torch.rand((1, 1, 10) )
A =pipe(generator=snake_case__ , encoding=snake_case__ )
A =output.images[0]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A =torch_device
A =DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
A =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
A =torch.Generator(device=snake_case__ ).manual_seed(42 )
A =pipe(generator=snake_case__ )
A =output.audios[0]
A =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
A =np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
A =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 689 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase__ ):
"""simple docstring"""
_A = ["onnx"]
def __init__( self : Tuple , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
requires_backends(self , ["onnx"] )
@classmethod
def _a ( cls : List[str] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
@classmethod
def _a ( cls : Tuple , *snake_case__ : List[str] , **snake_case__ : Any ):
"""simple docstring"""
requires_backends(cls , ["onnx"] )
| 709 |
import os
import sys
import unittest
__a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
A =find_backend(" if not is_torch_available():" )
self.assertEqual(snake_case__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A =find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A =find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(snake_case__ , "torch_and_transformers_and_onnx" )
def _a ( self : List[Any] ):
"""simple docstring"""
A =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , snake_case__ )
self.assertIn("torch_and_transformers" , snake_case__ )
self.assertIn("flax_and_transformers" , snake_case__ )
self.assertIn("torch_and_transformers_and_onnx" , snake_case__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _a ( self : Dict ):
"""simple docstring"""
A =create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(snake_case__ , "\nCONSTANT = None\n" )
A =create_dummy_object("function" , "'torch'" )
self.assertEqual(
snake_case__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A ="\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A =create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(snake_case__ , snake_case__ )
def _a ( self : Tuple ):
"""simple docstring"""
A ="# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A =create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , snake_case__ )
| 689 | 0 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 710 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__:
"""simple docstring"""
_A = 42
_A = None
_A = None
__a = namedtuple("""CoinsDistribResult""", """moves excess""")
def UpperCamelCase_ ( a_ ) ->int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A =get_distrib(node.left )
A , A =get_distrib(node.right )
A =1 - left_distrib_excess
A =1 - right_distrib_excess
A =(
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 689 | 0 |
from __future__ import annotations
__a = """#"""
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
A ={}
def _a ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
A =self._trie
for char in text:
if char not in trie:
A ={}
A =trie[char]
A =True
def _a ( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
A =self._trie
for char in prefix:
if char in trie:
A =trie[char]
else:
return []
return self._elements(snake_case__ )
def _a ( self : Union[str, Any] , snake_case__ : dict ):
"""simple docstring"""
A =[]
for c, v in d.items():
A =[" "] if c == END else [(c + s) for s in self._elements(snake_case__ )]
result.extend(snake_case__ )
return tuple(snake_case__ )
__a = Trie()
__a = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def UpperCamelCase_ ( a_ ) ->tuple:
A =trie.find_word(a_ )
return tuple(string + word for word in suffixes )
def UpperCamelCase_ ( ) ->None:
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 711 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""vocab_file""": """vocab.txt"""}
__a = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__a = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def UpperCamelCase_ ( a_ ) ->List[Any]:
A =collections.OrderedDict()
with open(a_ , "r" , encoding="utf-8" ) as reader:
A =reader.readlines()
for index, token in enumerate(a_ ):
A =token.rstrip("\n" )
A =index
return vocab
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int="<unk>" , snake_case__ : Optional[Any]=2_00 ):
"""simple docstring"""
A =vocab
A =unk_token
A =max_input_chars_per_word
def _a ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
A =list(snake_case__ )
if len(snake_case__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A =0
A =[]
while start < len(snake_case__ ):
A =len(snake_case__ )
A =None
while start < end:
A ="".join(chars[start:end] )
if substr in self.vocab:
A =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(snake_case__ )
A =end
return sub_tokens
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = False
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="<d>" , snake_case__ : Optional[int]="</d>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<pad>" , snake_case__ : Any="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Any="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=snake_case__ , eod_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , unk_token=snake_case__ , line_token=snake_case__ , space_token=snake_case__ , padding_side=snake_case__ , **snake_case__ , )
A =bod_token
A =eod_token
A =load_vocab(snake_case__ )
A =self.encoder[space_token]
A =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
A ={v: k for k, v in self.encoder.items()}
A =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _a ( self : Any ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _a ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , snake_case__ : int ):
"""simple docstring"""
A =[]
for x in jieba.cut(snake_case__ , cut_all=snake_case__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(snake_case__ ) )
return output_tokens
def _a ( self : List[Any] , snake_case__ : List[Any] , **snake_case__ : str ):
"""simple docstring"""
A =[i for i in token_ids if i >= 0]
A =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(snake_case__ , **snake_case__ )
def _a ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
return token in self.encoder
def _a ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
return "".join(snake_case__ )
def _a ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(snake_case__ ):
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A =(filename_prefix + "-" if filename_prefix else "") + save_directory
A =0
if " " in self.encoder:
A =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A =self.encoder["\n"]
del self.encoder["\n"]
A =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _a ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ ))
| 689 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__a = get_logger(__name__)
class UpperCamelCase__:
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case__ : Optional[str] = None ):
"""simple docstring"""
A =(
os.path.join(snake_case__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
A =Extractor
def _a ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
A =os.path.abspath(snake_case__ )
return os.path.join(self.extract_dir , hash_url_to_filename(snake_case__ ) )
def _a ( self : Optional[int] , snake_case__ : str , snake_case__ : bool ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(snake_case__ ) and not (os.path.isdir(snake_case__ ) and os.listdir(snake_case__ ))
)
def _a ( self : int , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
A =self.extractor.infer_extractor_format(snake_case__ )
if not extractor_format:
return input_path
A =self._get_output_path(snake_case__ )
if self._do_extract(snake_case__ , snake_case__ ):
self.extractor.extract(snake_case__ , snake_case__ , snake_case__ )
return output_path
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
@classmethod
@abstractmethod
def _a ( cls : Optional[Any] , snake_case__ : Union[Path, str] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
...
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_A = []
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : int ):
"""simple docstring"""
with open(snake_case__ , "rb" ) as f:
return f.read(snake_case__ )
@classmethod
def _a ( cls : Union[str, Any] , snake_case__ : Union[Path, str] , snake_case__ : bytes = b"" ):
"""simple docstring"""
if not magic_number:
A =max(len(snake_case__ ) for cls_magic_number in cls.magic_numbers )
try:
A =cls.read_magic_number(snake_case__ , snake_case__ )
except OSError:
return False
return any(magic_number.startswith(snake_case__ ) for cls_magic_number in cls.magic_numbers )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
@classmethod
def _a ( cls : List[Any] , snake_case__ : Union[Path, str] , **snake_case__ : int ):
"""simple docstring"""
return tarfile.is_tarfile(snake_case__ )
@staticmethod
def _a ( snake_case__ : List[Any] , snake_case__ : int ):
"""simple docstring"""
def resolved(snake_case__ : str ) -> str:
return os.path.realpath(os.path.abspath(snake_case__ ) )
def badpath(snake_case__ : str , snake_case__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(snake_case__ , snake_case__ ) ).startswith(snake_case__ )
def badlink(snake_case__ : Tuple , snake_case__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
A =resolved(os.path.join(snake_case__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=snake_case__ )
A =resolved(snake_case__ )
for finfo in members:
if badpath(finfo.name , snake_case__ ):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(snake_case__ , snake_case__ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(snake_case__ , snake_case__ ):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A =tarfile.open(snake_case__ )
tar_file.extractall(snake_case__ , members=TarExtractor.safemembers(snake_case__ , snake_case__ ) )
tar_file.close()
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x1F\x8B"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
with gzip.open(snake_case__ , "rb" ) as gzip_file:
with open(snake_case__ , "wb" ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [
B"PK\x03\x04",
B"PK\x05\x06", # empty archive
B"PK\x07\x08", # spanned archive
]
@classmethod
def _a ( cls : Dict , snake_case__ : Union[Path, str] , snake_case__ : bytes = b"" ):
"""simple docstring"""
if super().is_extractable(snake_case__ , magic_number=snake_case__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(snake_case__ , "rb" ) as fp:
A =_EndRecData(snake_case__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
A =fp.read(snake_case__ ) # CD is where we expect it to be
if len(snake_case__ ) == sizeCentralDir:
A =struct.unpack(snake_case__ , snake_case__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with zipfile.ZipFile(snake_case__ , "r" ) as zip_file:
zip_file.extractall(snake_case__ )
zip_file.close()
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
with lzma.open(snake_case__ ) as compressed_file:
with open(snake_case__ , "wb" ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"Rar!\x1a\x07\x00", B"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A =rarfile.RarFile(snake_case__ )
rf.extractall(snake_case__ )
rf.close()
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x28\xb5\x2F\xFD"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
A =zstd.ZstdDecompressor()
with open(snake_case__ , "rb" ) as ifh, open(snake_case__ , "wb" ) as ofh:
dctx.copy_stream(snake_case__ , snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x42\x5A\x68"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
with bza.open(snake_case__ , "rb" ) as compressed_file:
with open(snake_case__ , "wb" ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with pyazr.SevenZipFile(snake_case__ , "r" ) as archive:
archive.extractall(snake_case__ )
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = [B"\x04\x22\x4D\x18"]
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(snake_case__ , "rb" ) as compressed_file:
with open(snake_case__ , "wb" ) as extracted_file:
shutil.copyfileobj(snake_case__ , snake_case__ )
class UpperCamelCase__:
"""simple docstring"""
_A = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _a ( cls : Union[str, Any] ):
"""simple docstring"""
return max(
len(snake_case__ )
for extractor in cls.extractors.values()
if issubclass(snake_case__ , snake_case__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _a ( snake_case__ : Union[Path, str] , snake_case__ : int ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(snake_case__ , magic_number_length=snake_case__ )
except OSError:
return b""
@classmethod
def _a ( cls : Optional[int] , snake_case__ : Union[Path, str] , snake_case__ : bool = False ):
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=snake_case__ , )
A =cls.infer_extractor_format(snake_case__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _a ( cls : int , snake_case__ : Union[Path, str] ): # <Added version="2.4.0"/>
"""simple docstring"""
A =cls._get_magic_number_max_length()
A =cls._read_magic_number(snake_case__ , snake_case__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(snake_case__ , magic_number=snake_case__ ):
return extractor_format
@classmethod
def _a ( cls : Union[str, Any] , snake_case__ : Union[Path, str] , snake_case__ : Union[Path, str] , snake_case__ : Optional[str] = None , snake_case__ : Optional[BaseExtractor] = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(snake_case__ ) , exist_ok=snake_case__ )
# Prevent parallel extractions
A =str(Path(snake_case__ ).with_suffix(".lock" ) )
with FileLock(snake_case__ ):
shutil.rmtree(snake_case__ , ignore_errors=snake_case__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(snake_case__ , snake_case__ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=snake_case__ , )
A =extractor if extractor != "deprecated" else extractor_format
else:
A =cls.extractors[extractor_format]
return extractor.extract(snake_case__ , snake_case__ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=snake_case__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(snake_case__ ):
return extractor.extract(snake_case__ , snake_case__ )
| 712 |
def UpperCamelCase_ ( a_ = 6008_5147_5143 ) ->int:
try:
A =int(a_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A =2
A =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A =i
while n % i == 0:
A =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 689 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase_ ( ) ->str:
A =ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=a_ )
A =parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
A =parser.parse_args()
if not hasattr(a_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main()
| 713 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , a_ ) ->str:
# load base model
A =StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A =load_file(a_ )
A =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A =key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
A =pipeline.text_encoder
else:
A =key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
A =pipeline.unet
# find the target layer
A =layer_infos.pop(0 )
while len(a_ ) > -1:
try:
A =curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
A =layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A =layer_infos.pop(0 )
A =[]
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A =state_dict[pair_keys[0]].to(torch.floataa )
A =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
__a = parser.parse_args()
__a = args.base_model_path
__a = args.checkpoint_path
__a = args.dump_path
__a = args.lora_prefix_unet
__a = args.lora_prefix_text_encoder
__a = args.alpha
__a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__a = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 714 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 689 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase__( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : int = 7_68 , ):
"""simple docstring"""
super().__init__()
A =nn.Parameter(torch.zeros(1 , snake_case__ ) )
A =nn.Parameter(torch.ones(1 , snake_case__ ) )
def _a ( self : List[Any] , snake_case__ : Optional[Union[str, torch.device]] = None , snake_case__ : Optional[torch.dtype] = None , ):
"""simple docstring"""
A =nn.Parameter(self.mean.to(snake_case__ ).to(snake_case__ ) )
A =nn.Parameter(self.std.to(snake_case__ ).to(snake_case__ ) )
return self
def _a ( self : Dict , snake_case__ : Union[str, Any] ):
"""simple docstring"""
A =(embeds - self.mean) * 1.0 / self.std
return embeds
def _a ( self : Dict , snake_case__ : str ):
"""simple docstring"""
A =(embeds * self.std) + self.mean
return embeds
| 715 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__a = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__a = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__a = """|""".join(sys.argv[1:])
__a = re.compile(rF'''^({joined_dirs}).*?\.py$''')
__a = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 689 | 0 |
__a = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__a = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__a = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( a_ ) ->float:
if edge <= 0 or not isinstance(a_ , a_ ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase_ ( a_ ) ->float:
if edge <= 0 or not isinstance(a_ , a_ ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 689 | 0 |
def UpperCamelCase_ ( a_ ) ->Optional[int]:
A =len(a_ )
for i in range(length - 1 ):
A =i
for k in range(i + 1 , a_ ):
if collection[k] < collection[least]:
A =k
if least != i:
A , A =(collection[i], collection[least])
return collection
if __name__ == "__main__":
__a = input("""Enter numbers separated by a comma:\n""").strip()
__a = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 718 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase_ ( ) ->Dict:
A =ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
A =parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(a_ )
DownloadCommand.register_subcommand(a_ )
EnvironmentCommand.register_subcommand(a_ )
RunCommand.register_subcommand(a_ )
ServeCommand.register_subcommand(a_ )
UserCommands.register_subcommand(a_ )
AddNewModelCommand.register_subcommand(a_ )
AddNewModelLikeCommand.register_subcommand(a_ )
LfsCommands.register_subcommand(a_ )
PTtoTFCommand.register_subcommand(a_ )
# Let's go
A =parser.parse_args()
if not hasattr(a_ , "func" ):
parser.print_help()
exit(1 )
# Run
A =args.func(a_ )
service.run()
if __name__ == "__main__":
main()
| 719 |
from __future__ import annotations
import math
def UpperCamelCase_ ( a_ , a_ ) ->float:
A =u
for i in range(1 , a_ ):
A =temp * (u - i)
return temp
def UpperCamelCase_ ( ) ->None:
A =int(input("enter the numbers of values: " ) )
A =[]
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
A =0
print("enter the values of parameters in a list: " )
A =list(map(a_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(a_ ):
A =float(input() )
A =int(input("enter the value to interpolate: " ) )
A =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
A =y[j + 1][i - 1] - y[j][i - 1]
A =y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 689 | 0 |
from collections import deque
def UpperCamelCase_ ( a_ ) ->List[str]:
A =len(a_ )
A =deque()
A =[False for _ in range(a_ )]
A =[-1 for _ in range(a_ )]
A =index_of[:]
def strong_connect(a_ , a_ , a_ ):
A =index # the number when this node is seen
A =index # lowest rank node reachable from here
index += 1
stack.append(a_ )
A =True
for w in g[v]:
if index_of[w] == -1:
A =strong_connect(a_ , a_ , a_ )
A =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A =[]
A =stack.pop()
A =False
component.append(a_ )
while w != v:
A =stack.pop()
A =False
component.append(a_ )
components.append(a_ )
return index
A =[]
for v in range(a_ ):
if index_of[v] == -1:
strong_connect(a_ , 0 , a_ )
return components
def UpperCamelCase_ ( a_ , a_ ) ->Dict:
A =[[] for _ in range(a_ )]
for u, v in edges:
g[u].append(a_ )
return g
if __name__ == "__main__":
# Test
__a = 7
__a = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__a = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__a = [(u, v) for u, v in zip(source, target)]
__a = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 720 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( a_ ) ->Any:
# getting number of pixels in the image
A , A =img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_ ):
for j in range(a_ ):
A =[255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__a = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__a = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 689 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__a = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__a = F'''https://www.google.com/search?q={query}&num=100'''
__a = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__a = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__a = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 721 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 0 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
return int((input_a, input_a).count(0 ) != 0 )
def UpperCamelCase_ ( ) ->None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 700 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.