code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_a : List[str] = logging.get_logger(__name__)
class __A (__magic_name__ ):
snake_case :Optional[Any] = "upernet"
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=5_12 , UpperCamelCase_=0.0_2 , UpperCamelCase_=[1, 2, 3, 6] , UpperCamelCase_=True , UpperCamelCase_=0.4 , UpperCamelCase_=3_84 , UpperCamelCase_=2_56 , UpperCamelCase_=1 , UpperCamelCase_=False , UpperCamelCase_=2_55 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : int = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = backbone_config.get("model_type" )
__UpperCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Union[str, Any] = config_class.from_dict(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = backbone_config
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : str = pool_scales
__UpperCAmelCase : Tuple = use_auxiliary_head
__UpperCAmelCase : int = auxiliary_loss_weight
__UpperCAmelCase : List[Any] = auxiliary_in_channels
__UpperCAmelCase : Tuple = auxiliary_channels
__UpperCAmelCase : Any = auxiliary_num_convs
__UpperCAmelCase : Tuple = auxiliary_concat_input
__UpperCAmelCase : List[Any] = loss_ignore_index
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[Any] = self.backbone_config.to_dict()
__UpperCAmelCase : Any = self.__class__.model_type
return output
| 10 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : int = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
_a : Tuple = 1.60_21e-19 # units = C
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
import requests
_a : Optional[Any] = "YOUR API KEY"
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = giphy_api_key ) -> list:
"""simple docstring"""
__UpperCAmelCase : Tuple = "+".join(query.split() )
__UpperCAmelCase : Optional[Any] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__UpperCAmelCase : Union[str, Any] = requests.get(lowerCamelCase__ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 10 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__UpperCAmelCase : Optional[Any] = {"+", "-", "*", "/"}
__UpperCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowerCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_a : List[str] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 10 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 1 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
"""simple docstring"""
try:
__UpperCAmelCase : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__UpperCAmelCase : List[str] = default
else:
# KEY is set, convert it to True or False.
try:
__UpperCAmelCase : int = strtobool(lowerCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
_a : Any = parse_flag_from_env("RUN_SLOW", default=False)
_a : str = parse_flag_from_env("RUN_REMOTE", default=False)
_a : Any = parse_flag_from_env("RUN_LOCAL", default=True)
_a : Union[str, Any] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
_a : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
_a : Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
_a : List[str] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
_a : Dict = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
_a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
_a : int = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
_a : str = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__UpperCAmelCase : List[str] = unittest.skip("test requires faiss" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__UpperCAmelCase : Tuple = unittest.skip("test requires regex" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__UpperCAmelCase : List[str] = unittest.skip("test requires elasticsearch" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__UpperCAmelCase : Any = unittest.skip("test requires sqlalchemy" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__UpperCAmelCase : int = unittest.skip("test requires PyTorch" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
if not config.TF_AVAILABLE:
__UpperCAmelCase : List[str] = unittest.skip("test requires TensorFlow" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if not config.JAX_AVAILABLE:
__UpperCAmelCase : str = unittest.skip("test requires JAX" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
__UpperCAmelCase : Union[str, Any] = unittest.skip("test requires Pillow" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(lowerCamelCase__ )
else:
return test_case
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(lowerCamelCase__ )
else:
return test_case
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase__ )
else:
return test_case
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
def _require_spacy_model(lowerCamelCase__ ):
try:
import spacy # noqa F401
spacy.load(lowerCamelCase__ )
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase__ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(lowerCamelCase__ ) )(lowerCamelCase__ )
else:
return test_case
return _require_spacy_model
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(lowerCamelCase__ )
else:
return test_case
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(lowerCamelCase__ )
else:
return test_case
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__UpperCAmelCase : int = unittest.skip("test is slow" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__UpperCAmelCase : List[str] = unittest.skip("test is local" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__UpperCAmelCase : Tuple = unittest.skip("test is packaged" )(lowerCamelCase__ )
return test_case
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__UpperCAmelCase : Any = unittest.skip("test requires remote" )(lowerCamelCase__ )
return test_case
def _lowercase ( *lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowerCamelCase__ ) and name.startswith("test" ):
for decorator in decorators:
__UpperCAmelCase : Tuple = decorator(lowerCamelCase__ )
setattr(cls , lowerCamelCase__ , lowerCamelCase__ )
return cls
return decorate
class __A (__magic_name__ ):
pass
class __A (__magic_name__ ):
snake_case :List[str] = 0
snake_case :Any = 1
snake_case :Dict = 2
@contextmanager
def _lowercase ( lowerCamelCase__=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase__=1e-1_6 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = requests.Session().request
def timeout_request(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
# Change the url to an invalid url so that the connection hangs
__UpperCAmelCase : List[Any] = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__UpperCAmelCase : str = timeout
try:
return online_request(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__UpperCAmelCase : Any = url
__UpperCAmelCase : Any = e.args[0]
__UpperCAmelCase : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f"""OfflineMock[{url}]""" ),)
__UpperCAmelCase : str = (max_retry_error,)
raise
def raise_connection_error(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
raise requests.ConnectionError("Offline mode is enabled." , request=lowerCamelCase__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , lowerCamelCase__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , lowerCamelCase__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase__ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def _lowercase ( *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : int = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCamelCase__ , **lowerCamelCase__ ) as tmp_dir:
try:
os.chdir(lowerCamelCase__ )
yield
finally:
os.chdir(lowerCamelCase__ )
@contextmanager
def _lowercase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
__UpperCAmelCase : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
import gc
gc.collect()
__UpperCAmelCase : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
return deepcopy(lowerCamelCase__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowerCamelCase__ ).integers(0 , 100 , 10 ).tolist()
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ):
try:
return func(*lowerCamelCase__ , **lowerCamelCase__ )
except HTTPError as err:
if str(lowerCamelCase__ ).startswith("500" ) or str(lowerCamelCase__ ).startswith("502" ):
pytest.xfail(str(lowerCamelCase__ ) )
raise err
return decorator.decorator(_wrapper , lowerCamelCase__ )
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = returncode
__UpperCAmelCase : Optional[int] = stdout
__UpperCAmelCase : int = stderr
async def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
while True:
__UpperCAmelCase : Any = await stream.readline()
if line:
callback(lowerCamelCase__ )
else:
break
async def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(lowerCamelCase__ ) )
__UpperCAmelCase : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Tuple = []
def tee(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" ):
__UpperCAmelCase : List[Any] = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase__ )
if not quiet:
print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label="stderr:" ) ),
] , timeout=lowerCamelCase__ , )
return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=180 , lowerCamelCase__=False , lowerCamelCase__=True ) -> _RunOutput:
"""simple docstring"""
__UpperCAmelCase : Dict = asyncio.get_event_loop()
__UpperCAmelCase : int = loop.run_until_complete(
_stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) )
__UpperCAmelCase : List[Any] = " ".join(lowerCamelCase__ )
if result.returncode > 0:
__UpperCAmelCase : Dict = "\n".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase : Tuple = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
__UpperCAmelCase : Tuple = re.sub(R"^gw" , "" , lowerCamelCase__ , 0 , re.M )
return int(lowerCamelCase__ )
def _lowercase ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = 2_9500
__UpperCAmelCase : Any = pytest_xdist_worker_id()
return port + uniq_delta
| 10 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __A :
snake_case :int = BlenderbotConfig
snake_case :int = {}
snake_case :Dict = "gelu"
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = eos_token_id
__UpperCAmelCase : Union[str, Any] = pad_token_id
__UpperCAmelCase : Union[str, Any] = bos_token_id
def _snake_case ( self ):
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = TFBlenderbotModel(config=UpperCamelCase_ ).get_decoder()
__UpperCAmelCase : Union[str, Any] = inputs_dict["input_ids"]
__UpperCAmelCase : Dict = input_ids[:1, :]
__UpperCAmelCase : int = inputs_dict["attention_mask"][:1, :]
__UpperCAmelCase : Union[str, Any] = inputs_dict["head_mask"]
__UpperCAmelCase : Union[str, Any] = 1
# first forward pass
__UpperCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__UpperCAmelCase , __UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCAmelCase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCAmelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCAmelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
__UpperCAmelCase : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
snake_case :str = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
snake_case :Optional[int] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case :Tuple = True
snake_case :Any = False
snake_case :Any = False
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = TFBlenderbotModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_tokenizers
@require_tf
class __A (unittest.TestCase ):
snake_case :Union[str, Any] = ["My friends are cool but they eat too many carbs."]
snake_case :Tuple = "facebook/blenderbot-400M-distill"
@cached_property
def _snake_case ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.tokenizer(self.src_text , return_tensors="tf" )
__UpperCAmelCase : List[str] = self.model.generate(
model_inputs.input_ids , )
__UpperCAmelCase : int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 1 |
'''simple docstring'''
_a : List[str] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _lowercase ( ) -> tuple[list[int], int]:
"""simple docstring"""
__UpperCAmelCase : str = [randint(-1000 , 1000 ) for i in range(10 )]
__UpperCAmelCase : Dict = randint(-5000 , 5000 )
return (arr, r)
_a : str = make_dataset()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(lowerCamelCase__ , 3 ):
if sum(lowerCamelCase__ ) == target:
return tuple(sorted(lowerCamelCase__ ) )
return (0, 0, 0)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
__UpperCAmelCase : List[Any] = len(lowerCamelCase__ )
for i in range(n - 1 ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _lowercase ( ) -> tuple[float, float]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
__UpperCAmelCase : Optional[Any] = "\ntriplet_sum1(*dataset)\n"
__UpperCAmelCase : List[Any] = "\ntriplet_sum2(*dataset)\n"
__UpperCAmelCase : Any = repeat(setup=lowerCamelCase__ , stmt=lowerCamelCase__ , repeat=5 , number=1_0000 )
__UpperCAmelCase : int = repeat(setup=lowerCamelCase__ , stmt=lowerCamelCase__ , repeat=5 , number=1_0000 )
return (min(lowerCamelCase__ ), min(lowerCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a : Tuple = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 10 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_a : List[Any] = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ ) -> List[int]:
"""simple docstring"""
if isinstance(lowerCamelCase__ , np.ndarray ):
return list(tensor.shape )
__UpperCAmelCase : List[Any] = tf.shape(lowerCamelCase__ )
if tensor.shape == tf.TensorShape(lowerCamelCase__ ):
return dynamic
__UpperCAmelCase : List[str] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase__ )]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowerCamelCase__ , name=lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__=-1 ) -> str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
__UpperCAmelCase , __UpperCAmelCase : Tuple = tf.nn.moments(lowerCamelCase__ , axes=[axis] , keepdims=lowerCamelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__UpperCAmelCase : Dict = [1] * inputs.shape.rank
__UpperCAmelCase : Any = shape_list(lowerCamelCase__ )[axis]
__UpperCAmelCase : Tuple = tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : str = tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
# Compute layer normalization using the batch_normalization
# function.
__UpperCAmelCase : List[str] = tf.nn.batch_normalization(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , offset=lowerCamelCase__ , scale=lowerCamelCase__ , variance_epsilon=lowerCamelCase__ , )
return outputs
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=0 , lowerCamelCase__=-1 ) -> str:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__UpperCAmelCase : List[Any] = tf.shape(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__UpperCAmelCase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , tf.Tensor ):
__UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(lowerCamelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__UpperCAmelCase : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__UpperCAmelCase : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__UpperCAmelCase : Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
lowerCamelCase__ , tf.cast(lowerCamelCase__ , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase__ )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Tuple = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__UpperCAmelCase : List[Any] = [x for x in data if len(lowerCamelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
__UpperCAmelCase : Any = np.asarray(lowerCamelCase__ )
__UpperCAmelCase : str = 1
__UpperCAmelCase : Tuple = np.array_split(lowerCamelCase__ , lowerCamelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__UpperCAmelCase : Tuple = np.array_split(lowerCamelCase__ , lowerCamelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCamelCase__ ):
__UpperCAmelCase : Tuple = chunk_data
else:
__UpperCAmelCase : int = data
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
if name in group.attrs:
__UpperCAmelCase : Optional[int] = [n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs[name]]
else:
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCamelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase__ )
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : str = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = dataset
__UpperCAmelCase : Optional[int] = process
__UpperCAmelCase : Dict = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.dataset[i]
__UpperCAmelCase : Optional[Any] = self.process(UpperCamelCase_ , **self.params )
return processed
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
__UpperCAmelCase : Optional[Any] = loader
__UpperCAmelCase : Tuple = infer
__UpperCAmelCase : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : List[str] = loader_batch_size
# Internal bookkeeping
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Any = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
__UpperCAmelCase : Tuple = iter(self.loader )
return self
def _snake_case ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__UpperCAmelCase : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__UpperCAmelCase : Any = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# Convert ModelOutput to tuple first
__UpperCAmelCase : int = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__UpperCAmelCase : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__UpperCAmelCase : Optional[int] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__UpperCAmelCase : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__UpperCAmelCase : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__UpperCAmelCase : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__UpperCAmelCase : str = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__UpperCAmelCase : List[str] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__UpperCAmelCase : List[str] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__UpperCAmelCase : Optional[Any] = self._loader_batch_data.__class__(UpperCamelCase_ )
self._loader_batch_index += 1
return result
def _snake_case ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__UpperCAmelCase : Optional[int] = next(self.iterator )
__UpperCAmelCase : int = self.infer(UpperCamelCase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Optional[int] = processed
else:
__UpperCAmelCase : Dict = list(processed.keys() )[0]
__UpperCAmelCase : Tuple = processed[key]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = len(UpperCamelCase_ )
else:
__UpperCAmelCase : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__UpperCAmelCase : Optional[int] = observed_batch_size
# Setting internal index to unwrap the batch
__UpperCAmelCase : Tuple = processed
__UpperCAmelCase : Union[str, Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __iter__( self ):
__UpperCAmelCase : Any = iter(self.loader )
__UpperCAmelCase : List[Any] = None
return self
def _snake_case ( self ):
if self.subiterator is None:
__UpperCAmelCase : Any = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__UpperCAmelCase : Tuple = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__UpperCAmelCase : List[str] = self.infer(next(self.iterator ) , **self.params )
__UpperCAmelCase : List[Any] = next(self.subiterator )
return processed
class __A (__magic_name__ ):
def __iter__( self ):
__UpperCAmelCase : Tuple = iter(self.loader )
return self
def _snake_case ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Any = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__UpperCAmelCase : int = self.loader_batch_item()
__UpperCAmelCase : Optional[int] = item.pop("is_last" )
accumulator.append(UpperCamelCase_ )
if is_last:
return accumulator
while not is_last:
__UpperCAmelCase : List[Any] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = processed
else:
__UpperCAmelCase : Tuple = list(processed.keys() )[0]
__UpperCAmelCase : Optional[Any] = processed[key]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
else:
__UpperCAmelCase : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__UpperCAmelCase : List[Any] = observed_batch_size
__UpperCAmelCase : Union[str, Any] = processed
__UpperCAmelCase : int = 0
while self._loader_batch_index < self.loader_batch_size:
__UpperCAmelCase : int = self.loader_batch_item()
__UpperCAmelCase : str = item.pop("is_last" )
accumulator.append(UpperCamelCase_ )
if is_last:
return accumulator
else:
__UpperCAmelCase : List[Any] = processed
__UpperCAmelCase : Optional[int] = item.pop("is_last" )
accumulator.append(UpperCamelCase_ )
return accumulator
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = dataset
__UpperCAmelCase : List[str] = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , UpperCamelCase_ ):
return self.dataset[i][self.key]
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = dataset
__UpperCAmelCase : Union[str, Any] = keya
__UpperCAmelCase : Optional[Any] = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , UpperCamelCase_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 10 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A (__magic_name__ , unittest.TestCase ):
snake_case :List[Any] = LEDTokenizer
snake_case :List[str] = LEDTokenizerFast
snake_case :Tuple = True
def _snake_case ( self ):
super().setUp()
__UpperCAmelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__UpperCAmelCase : Any = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__UpperCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
__UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase_ ) )
def _snake_case ( self , **UpperCamelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def _snake_case ( self , **UpperCamelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self ):
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _snake_case ( self ):
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__UpperCAmelCase : Tuple = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Tuple = tokenizer(UpperCamelCase_ , max_length=len(UpperCamelCase_ ) , padding=UpperCamelCase_ , return_tensors="pt" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Tuple = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
self.assertIn("input_ids" , UpperCamelCase_ )
self.assertIn("attention_mask" , UpperCamelCase_ )
self.assertNotIn("labels" , UpperCamelCase_ )
self.assertNotIn("decoder_attention_mask" , UpperCamelCase_ )
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : List[str] = tokenizer(text_target=UpperCamelCase_ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def _snake_case ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Tuple = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="pt" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : List[str] = ["A long paragraph for summarization."]
__UpperCAmelCase : Any = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase_ , return_tensors="pt" )
__UpperCAmelCase : str = tokenizer(text_target=UpperCamelCase_ , return_tensors="pt" )
__UpperCAmelCase : str = inputs["input_ids"]
__UpperCAmelCase : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _snake_case ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCAmelCase : Dict = ["Summary of the text.", "Another summary."]
__UpperCAmelCase : str = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__UpperCAmelCase : int = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
__UpperCAmelCase : Tuple = [[0] * len(UpperCamelCase_ ) for x in encoded_output["input_ids"]]
__UpperCAmelCase : int = tokenizer.pad(UpperCamelCase_ )
self.assertSequenceEqual(outputs["global_attention_mask"] , UpperCamelCase_ )
def _snake_case ( self ):
pass
def _snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Tuple = "A, <mask> AllenNLP sentence."
__UpperCAmelCase : str = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
__UpperCAmelCase : List[str] = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__UpperCAmelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
UpperCamelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
import random
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> dict:
"""simple docstring"""
__UpperCAmelCase : dict = {i: [] for i in range(lowerCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase__ ):
for j in range(i + 1 , lowerCamelCase__ ):
if random.random() < probability:
graph[i].append(lowerCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase__ )
return graph
def _lowercase ( lowerCamelCase__ ) -> dict:
"""simple docstring"""
return {
i: [j for j in range(lowerCamelCase__ ) if i != j] for i in range(lowerCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 1 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A (__magic_name__ ):
def __get__( self , UpperCamelCase_ , UpperCamelCase_=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
__UpperCAmelCase : Any = "__cached_" + self.fget.__name__
__UpperCAmelCase : int = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if cached is None:
__UpperCAmelCase : Union[str, Any] = self.fget(UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return cached
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if is_torch_fx_proxy(lowerCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
return _is_numpy(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.device )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ )
else:
return False
return isinstance(lowerCamelCase__ , torch.dtype )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
import tensorflow as tf
return isinstance(lowerCamelCase__ , tf.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowerCamelCase__ )
return type(lowerCamelCase__ ) == tf.Tensor
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase__ , jnp.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return [to_py_obj(lowerCamelCase__ ) for o in obj]
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ ).tolist()
elif isinstance(lowerCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return np.array(lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ )
else:
return obj
class __A (__magic_name__ ):
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
__UpperCAmelCase : int = getattr(self , class_fields[0].name )
__UpperCAmelCase : Dict = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : int = first_field.items()
__UpperCAmelCase : Any = True
else:
try:
__UpperCAmelCase : Any = iter(UpperCamelCase_ )
__UpperCAmelCase : str = True
except TypeError:
__UpperCAmelCase : Any = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase_ ):
if (
not isinstance(UpperCamelCase_ , (list, tuple) )
or not len(UpperCamelCase_ ) == 2
or not isinstance(element[0] , UpperCamelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCAmelCase : List[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCAmelCase : Tuple = element[1]
elif first_field is not None:
__UpperCAmelCase : Dict = first_field
else:
for field in class_fields:
__UpperCAmelCase : int = getattr(self , field.name )
if v is not None:
__UpperCAmelCase : Tuple = v
def __delitem__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , UpperCamelCase_ , UpperCamelCase_ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ):
# Will raise a KeyException if needed
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
return tuple(self[k] for k in self.keys() )
class __A (__magic_name__ , __magic_name__ ):
@classmethod
def _snake_case ( cls , UpperCamelCase_ ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __A (__magic_name__ ):
snake_case :Optional[Any] = "longest"
snake_case :str = "max_length"
snake_case :Optional[Any] = "do_not_pad"
class __A (__magic_name__ ):
snake_case :int = "pt"
snake_case :Union[str, Any] = "tf"
snake_case :int = "np"
snake_case :str = "jax"
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = context_managers
__UpperCAmelCase : Optional[int] = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase_ )
def __exit__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
self.stack.__exit__(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : List[str] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : List[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : int = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = model_class.__name__
__UpperCAmelCase : Optional[int] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : int = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : Any = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = "" , lowerCamelCase__ = "." ) -> Optional[Any]:
"""simple docstring"""
def _flatten_dict(lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="." ):
for k, v in d.items():
__UpperCAmelCase : Union[str, Any] = str(lowerCamelCase__ ) + delimiter + str(lowerCamelCase__ ) if parent_key else k
if v and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
yield from flatten_dict(lowerCamelCase__ , lowerCamelCase__ , delimiter=lowerCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) )
@contextmanager
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = False ) -> Dict:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.T if axes is None else array.permute(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCamelCase__ , perm=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for transpose: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.reshape(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.reshape(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for reshape: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.expand_dims(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.unsqueeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.size(lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.size(lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowerCamelCase__ , (tuple, list) ):
__UpperCAmelCase : List[Any] = [f"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCAmelCase : List[str] = f"""{repo_id}--{value}"""
return auto_map
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
for base_class in inspect.getmro(lowerCamelCase__ ):
__UpperCAmelCase : Dict = base_class.__module__
__UpperCAmelCase : Dict = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 10 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
_a : Optional[Any] = [True] * 1000001
_a : Optional[Any] = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
_a : Dict = False
i += 1
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
return seive[n]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
return any(digit in "02468" for digit in str(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ = 100_0000 ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowerCamelCase__ ) and not contains_an_even_digit(lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = str(lowerCamelCase__ )
__UpperCAmelCase : Tuple = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowerCamelCase__ ) )]
if all(is_prime(lowerCamelCase__ ) for i in list_nums ):
result.append(lowerCamelCase__ )
return result
def _lowercase ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 10 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 1 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_a : Tuple = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
_a : Optional[Any] = f"""https://www.google.com/search?q={query}&num=100"""
_a : Optional[int] = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
_a : Dict = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
_a : Union[str, Any] = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 10 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_a : str = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 , lowerCamelCase__=None ):
__UpperCAmelCase : str = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__UpperCAmelCase : List[str] = math.floor(val / multiple ) * multiple
if x < min_val:
__UpperCAmelCase : int = math.ceil(val / multiple ) * multiple
return x
__UpperCAmelCase : List[str] = (output_size, output_size) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else output_size
__UpperCAmelCase , __UpperCAmelCase : Tuple = get_image_size(lowerCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase : Any = output_size
# determine new height and width
__UpperCAmelCase : int = output_height / input_height
__UpperCAmelCase : List[str] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__UpperCAmelCase : List[str] = scale_width
else:
# fit height
__UpperCAmelCase : Union[str, Any] = scale_height
__UpperCAmelCase : str = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCamelCase__ )
__UpperCAmelCase : int = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCamelCase__ )
return (new_height, new_width)
class __A (__magic_name__ ):
snake_case :int = ["pixel_values"]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = False , UpperCamelCase_ = 1 , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = size if size is not None else {"height": 3_84, "width": 3_84}
__UpperCAmelCase : int = get_size_dict(UpperCamelCase_ )
__UpperCAmelCase : str = do_resize
__UpperCAmelCase : List[str] = size
__UpperCAmelCase : Dict = keep_aspect_ratio
__UpperCAmelCase : Optional[Any] = ensure_multiple_of
__UpperCAmelCase : List[Any] = resample
__UpperCAmelCase : str = do_rescale
__UpperCAmelCase : str = rescale_factor
__UpperCAmelCase : Tuple = do_normalize
__UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = 1 , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : List[Any] = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__UpperCAmelCase : Optional[Any] = get_resize_output_image_size(
UpperCamelCase_ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=UpperCamelCase_ , multiple=UpperCamelCase_ , )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
__UpperCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : List[str] = size if size is not None else self.size
__UpperCAmelCase : Any = get_size_dict(UpperCamelCase_ )
__UpperCAmelCase : List[str] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__UpperCAmelCase : Dict = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__UpperCAmelCase : Any = resample if resample is not None else self.resample
__UpperCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : int = image_std if image_std is not None else self.image_std
__UpperCAmelCase : int = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__UpperCAmelCase : Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__UpperCAmelCase : str = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
__UpperCAmelCase : Tuple = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__UpperCAmelCase : List[str] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__UpperCAmelCase : Dict = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__UpperCAmelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCamelCase_ ):
__UpperCAmelCase : List[str] = target_sizes.numpy()
__UpperCAmelCase : Optional[int] = []
for idx in range(len(UpperCamelCase_ ) ):
__UpperCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCamelCase_ )
__UpperCAmelCase : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
__UpperCAmelCase : int = logits.argmax(dim=1 )
__UpperCAmelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Dict = {"vocab_file": "sentencepiece.bpe.model"}
_a : str = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
_a : Tuple = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_a : str = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Dict = ["input_ids", "attention_mask"]
snake_case :List[int] = []
snake_case :List[int] = []
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_=False , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase : Any = legacy_behaviour
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : str = 1
__UpperCAmelCase : Union[str, Any] = len(self.sp_model )
__UpperCAmelCase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
__UpperCAmelCase : Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCAmelCase : Any = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__UpperCAmelCase : Optional[Any] = src_lang if src_lang is not None else "eng_Latn"
__UpperCAmelCase : Optional[int] = self.lang_code_to_id[self._src_lang]
__UpperCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
__UpperCAmelCase : int = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
return self._src_lang
@src_lang.setter
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = [1] * len(self.prefix_tokens )
__UpperCAmelCase : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Optional[Any] = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__UpperCAmelCase : Union[str, Any] = src_lang
__UpperCAmelCase : str = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = tgt_lang_id
return inputs
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Union[str, Any] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : str = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = "eng_Latn" , UpperCamelCase_ = None , UpperCamelCase_ = "fra_Latn" , **UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = src_lang
__UpperCAmelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__UpperCAmelCase : Dict = [self.cur_lang_code]
__UpperCAmelCase : Any = [self.eos_token_id]
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__UpperCAmelCase : Tuple = [self.cur_lang_code]
__UpperCAmelCase : Any = [self.eos_token_id]
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __A (ctypes.Structure ):
# _fields is a specific attr expected by ctypes
snake_case :Optional[int] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _lowercase ( ) -> str:
"""simple docstring"""
if os.name == "nt":
__UpperCAmelCase : List[str] = CursorInfo()
__UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
__UpperCAmelCase : Union[str, Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def _lowercase ( ) -> Any:
"""simple docstring"""
if os.name == "nt":
__UpperCAmelCase : Optional[Any] = CursorInfo()
__UpperCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
__UpperCAmelCase : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def _lowercase ( ) -> int:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 10 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Any = logging.get_logger(__name__)
_a : str = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class __A (__magic_name__ ):
snake_case :Any = "instructblip_vision_model"
def __init__( self , UpperCamelCase_=14_08 , UpperCamelCase_=61_44 , UpperCamelCase_=39 , UpperCamelCase_=16 , UpperCamelCase_=2_24 , UpperCamelCase_=14 , UpperCamelCase_="gelu" , UpperCamelCase_=1E-6 , UpperCamelCase_=0.0 , UpperCamelCase_=1E-10 , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Tuple = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Tuple = qkv_bias
@classmethod
def _snake_case ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
cls._set_token_in_kwargs(UpperCamelCase_ )
__UpperCAmelCase , __UpperCAmelCase : Any = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
__UpperCAmelCase : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class __A (__magic_name__ ):
snake_case :Any = "instructblip_qformer"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_="absolute" , UpperCamelCase_=2 , UpperCamelCase_=14_08 , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : str = position_embedding_type
__UpperCAmelCase : Optional[Any] = cross_attention_frequency
__UpperCAmelCase : int = encoder_hidden_size
@classmethod
def _snake_case ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
cls._set_token_in_kwargs(UpperCamelCase_ )
__UpperCAmelCase , __UpperCAmelCase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
__UpperCAmelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ )
class __A (__magic_name__ ):
snake_case :Optional[int] = "instructblip"
snake_case :Union[str, Any] = True
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=32 , **UpperCamelCase_ ):
super().__init__(**UpperCamelCase_ )
if vision_config is None:
__UpperCAmelCase : Optional[Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
__UpperCAmelCase : Dict = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
__UpperCAmelCase : Dict = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
__UpperCAmelCase : Any = InstructBlipVisionConfig(**UpperCamelCase_ )
__UpperCAmelCase : List[str] = InstructBlipQFormerConfig(**UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = text_config["model_type"] if "model_type" in text_config else "opt"
__UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.text_config.tie_word_embeddings
__UpperCAmelCase : str = self.text_config.is_encoder_decoder
__UpperCAmelCase : List[Any] = num_query_tokens
__UpperCAmelCase : str = self.vision_config.hidden_size
__UpperCAmelCase : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCAmelCase : Optional[int] = 1.0
__UpperCAmelCase : List[str] = 0.0_2
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , )
def _snake_case ( self ):
__UpperCAmelCase : int = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : int = self.vision_config.to_dict()
__UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
__UpperCAmelCase : List[Any] = self.text_config.to_dict()
__UpperCAmelCase : List[Any] = self.__class__.model_type
return output
| 10 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ ) -> list:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [0] * len(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
# use last results for better performance - dynamic programming
__UpperCAmelCase : Tuple = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__UpperCAmelCase : Tuple = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__UpperCAmelCase : Dict = j
return prefix_result
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return max(prefix_function(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : str = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) , end="\t" )
else:
print("INF" , end="\t" )
print()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__UpperCAmelCase : Dict = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__ , lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
_a : Tuple = int(input("Enter number of vertices: "))
_a : List[Any] = int(input("Enter number of edges: "))
_a : Union[str, Any] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
_a : Optional[int] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
_a : Dict = int(input("Enter source:"))
_a : str = int(input("Enter destination:"))
_a : str = float(input("Enter weight:"))
_a : Optional[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 10 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_a : Dict = datasets.utils.logging.get_logger(__name__)
class __A (folder_based_builder.FolderBasedBuilderConfig ):
snake_case :bool = None
snake_case :bool = None
class __A (folder_based_builder.FolderBasedBuilder ):
snake_case :Dict = datasets.Audio()
snake_case :List[str] = "audio"
snake_case :List[str] = AudioFolderConfig
snake_case :List[str] # definition at the bottom of the script
snake_case :Optional[Any] = AudioClassification(audio_column="audio" , label_column="label" )
_a : Tuple = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_a : Tuple = AUDIO_EXTENSIONS
| 10 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_="None" , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : List[str] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : Optional[Any] = use_token_type_ids
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : Any = relative_attention
__UpperCAmelCase : str = position_biased_input
__UpperCAmelCase : Any = pos_att_type
__UpperCAmelCase : List[Any] = scope
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = TFDebertaVaModel(config=UpperCamelCase_ )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : int = [input_ids, input_mask]
__UpperCAmelCase : Any = model(UpperCamelCase_ )
__UpperCAmelCase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = TFDebertaVaForMaskedLM(config=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Any = TFDebertaVaForSequenceClassification(config=UpperCamelCase_ )
__UpperCAmelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.num_labels
__UpperCAmelCase : str = TFDebertaVaForTokenClassification(config=UpperCamelCase_ )
__UpperCAmelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = TFDebertaVaForQuestionAnswering(config=UpperCamelCase_ )
__UpperCAmelCase : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Any = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case :Tuple = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case :List[Any] = False
snake_case :Optional[int] = False
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = TFDebertaVaModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(UpperCamelCase_ )
@require_tf
class __A (unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
__UpperCAmelCase : Any = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__UpperCAmelCase : Optional[Any] = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__UpperCAmelCase : Dict = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
__UpperCAmelCase : Any = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 )
| 10 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Dict = LayoutLMTokenizer
snake_case :Union[str, Any] = LayoutLMTokenizerFast
snake_case :Any = True
snake_case :int = True
def _snake_case ( self ):
super().setUp()
__UpperCAmelCase : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self , **UpperCamelCase_ ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = "UNwant\u00E9d,running"
__UpperCAmelCase : Optional[int] = "unwanted, running"
return input_text, output_text
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file )
__UpperCAmelCase : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self ):
pass
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=30 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=None , ):
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : str = image_size
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = type_sequence_label_size
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Dict = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : Tuple = (image_size // patch_size) ** 2
__UpperCAmelCase : Any = num_patches + 1
def _snake_case ( self ):
__UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = TFViTModel(config=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__UpperCAmelCase : List[str] = self.image_size // 2
__UpperCAmelCase : Dict = pixel_values[:, :, :image_size, :image_size]
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
__UpperCAmelCase : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.type_sequence_label_size
__UpperCAmelCase : Dict = TFViTForImageClassification(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__UpperCAmelCase : Optional[Any] = self.image_size // 2
__UpperCAmelCase : Optional[int] = pixel_values[:, :, :image_size, :image_size]
__UpperCAmelCase : Optional[int] = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase : str = 1
__UpperCAmelCase : Tuple = TFViTForImageClassification(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ):
__UpperCAmelCase : str = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
snake_case :str = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
snake_case :Union[str, Any] = False
snake_case :Tuple = False
snake_case :int = False
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = TFViTModelTester(self )
__UpperCAmelCase : int = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _snake_case ( self ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase_ )
__UpperCAmelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(UpperCamelCase_ )
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __A (unittest.TestCase ):
@cached_property
def _snake_case ( self ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _snake_case ( self ):
__UpperCAmelCase : Any = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
__UpperCAmelCase : str = self.default_image_processor
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Optional[int] = image_processor(images=UpperCamelCase_ , return_tensors="tf" )
# forward pass
__UpperCAmelCase : Optional[Any] = model(**UpperCamelCase_ )
# verify the logits
__UpperCAmelCase : List[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__UpperCAmelCase : List[str] = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
with open(lowerCamelCase__ ) as metadata_file:
__UpperCAmelCase : List[Any] = json.load(lowerCamelCase__ )
__UpperCAmelCase : Dict = LukeConfig(use_entity_aware_attention=lowerCamelCase__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__UpperCAmelCase : Optional[Any] = torch.load(lowerCamelCase__ , map_location="cpu" )["module"]
# Load the entity vocab file
__UpperCAmelCase : Optional[int] = load_original_entity_vocab(lowerCamelCase__ )
# add an entry for [MASK2]
__UpperCAmelCase : Dict = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__UpperCAmelCase : List[str] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCAmelCase : List[str] = AddedToken("<ent>" , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = AddedToken("<ent2>" , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , "r" ) as f:
__UpperCAmelCase : Dict = json.load(lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = "MLukeTokenizer"
with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Tuple = MLukeTokenizer.from_pretrained(lowerCamelCase__ )
# Initialize the embeddings of the special tokens
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(["@"] )[0]
__UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
__UpperCAmelCase : Tuple = state_dict["embeddings.word_embeddings.weight"]
__UpperCAmelCase : List[str] = word_emb[ent_init_index].unsqueeze(0 )
__UpperCAmelCase : int = word_emb[enta_init_index].unsqueeze(0 )
__UpperCAmelCase : Any = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__UpperCAmelCase : Union[str, Any] = state_dict[bias_name]
__UpperCAmelCase : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
__UpperCAmelCase : str = decoder_bias[enta_init_index].unsqueeze(0 )
__UpperCAmelCase : Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCAmelCase : int = f"""encoder.layer.{layer_index}.attention.self."""
__UpperCAmelCase : Tuple = state_dict[prefix + matrix_name]
__UpperCAmelCase : int = state_dict[prefix + matrix_name]
__UpperCAmelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCAmelCase : Tuple = state_dict["entity_embeddings.entity_embeddings.weight"]
__UpperCAmelCase : Any = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__UpperCAmelCase : List[str] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__UpperCAmelCase : Union[str, Any] = state_dict["entity_predictions.bias"]
__UpperCAmelCase : Union[str, Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__UpperCAmelCase : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
__UpperCAmelCase : Optional[Any] = LukeForMaskedLM(config=lowerCamelCase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__UpperCAmelCase : Any = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__UpperCAmelCase : Dict = state_dict[key]
else:
__UpperCAmelCase : Dict = state_dict[key]
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
if set(lowerCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowerCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__UpperCAmelCase : List[Any] = MLukeTokenizer.from_pretrained(lowerCamelCase__ , task="entity_classification" )
__UpperCAmelCase : Optional[int] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__UpperCAmelCase : Dict = (0, 9)
__UpperCAmelCase : List[str] = tokenizer(lowerCamelCase__ , entity_spans=[span] , return_tensors="pt" )
__UpperCAmelCase : int = model(**lowerCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCAmelCase : int = torch.Size((1, 33, 768) )
__UpperCAmelCase : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__UpperCAmelCase : Tuple = torch.Size((1, 1, 768) )
__UpperCAmelCase : Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__UpperCAmelCase : Optional[int] = MLukeTokenizer.from_pretrained(lowerCamelCase__ )
__UpperCAmelCase : int = "Tokyo is the capital of <mask>."
__UpperCAmelCase : List[str] = (24, 30)
__UpperCAmelCase : Any = tokenizer(lowerCamelCase__ , entity_spans=[span] , return_tensors="pt" )
__UpperCAmelCase : Tuple = model(**lowerCamelCase__ )
__UpperCAmelCase : List[Any] = encoding["input_ids"][0].tolist()
__UpperCAmelCase : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__UpperCAmelCase : Optional[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCamelCase__ )
__UpperCAmelCase : Dict = outputs.entity_logits[0][0].argmax().item()
__UpperCAmelCase : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowerCamelCase__ ) )
model.save_pretrained(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : int = ["[MASK]", "[PAD]", "[UNK]"]
__UpperCAmelCase : Dict = [json.loads(lowerCamelCase__ ) for line in open(lowerCamelCase__ )]
__UpperCAmelCase : str = {}
for entry in data:
__UpperCAmelCase : Any = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__UpperCAmelCase : int = entity_id
break
__UpperCAmelCase : Union[str, Any] = f"""{language}:{entity_name}"""
__UpperCAmelCase : List[str] = entity_id
return new_mapping
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_a : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 10 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 1 |
'''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import os
def _lowercase ( lowerCamelCase__ = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as input_file:
__UpperCAmelCase : str = [
[int(lowerCamelCase__ ) for element in line.split("," )]
for line in input_file.readlines()
]
__UpperCAmelCase : Dict = len(lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = len(matrix[0] )
__UpperCAmelCase : int = [[-1 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = matrix[i][0]
for j in range(1 , lowerCamelCase__ ):
for i in range(lowerCamelCase__ ):
__UpperCAmelCase : Optional[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCamelCase__ ):
__UpperCAmelCase : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__UpperCAmelCase : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __A (__magic_name__ ):
def __init__( self ):
# test for the above condition
self.test()
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : int = False
while not completed:
if counter == 1:
self.reset()
__UpperCAmelCase : Any = self.advance()
if not self.does_advance(UpperCamelCase_ ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self.update(UpperCamelCase_ )
counter += 1
if counter > 1_00_00:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def _snake_case ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case ( self , UpperCamelCase_ ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case ( self , UpperCamelCase_ ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case ( self ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def _snake_case ( self , UpperCamelCase_=False ):
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ ):
super(UpperCamelCase_ , self ).__init__()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
__UpperCAmelCase : Tuple = token_ids
__UpperCAmelCase : Any = len(self.token_ids )
__UpperCAmelCase : Union[str, Any] = -1 # the index of the currently fulfilled step
__UpperCAmelCase : int = False
def _snake_case ( self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _snake_case ( self , UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _snake_case ( self , UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" )
__UpperCAmelCase : str = False
__UpperCAmelCase : str = False
__UpperCAmelCase : int = False
if self.does_advance(UpperCamelCase_ ):
self.fulfilled_idx += 1
__UpperCAmelCase : Optional[Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[int] = completed
else:
# failed to make progress.
__UpperCAmelCase : Optional[int] = True
self.reset()
return stepped, completed, reset
def _snake_case ( self ):
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = 0
def _snake_case ( self ):
return self.seqlen - (self.fulfilled_idx + 1)
def _snake_case ( self , UpperCamelCase_=False ):
__UpperCAmelCase : Dict = PhrasalConstraint(self.token_ids )
if stateful:
__UpperCAmelCase : List[Any] = self.seqlen
__UpperCAmelCase : Dict = self.fulfilled_idx
__UpperCAmelCase : List[Any] = self.completed
return new_constraint
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=True ):
__UpperCAmelCase : Union[str, Any] = max([len(UpperCamelCase_ ) for one in nested_token_ids] )
__UpperCAmelCase : List[str] = {}
for token_ids in nested_token_ids:
__UpperCAmelCase : Dict = root
for tidx, token_id in enumerate(UpperCamelCase_ ):
if token_id not in level:
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Tuple = level[token_id]
if no_subsets and self.has_subsets(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
f""" {nested_token_ids}.""" )
__UpperCAmelCase : Union[str, Any] = root
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = self.trie
for current_token in current_seq:
__UpperCAmelCase : Dict = start[current_token]
__UpperCAmelCase : str = list(start.keys() )
return next_tokens
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = self.next_tokens(UpperCamelCase_ )
return len(UpperCamelCase_ ) == 0
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = list(root.values() )
if len(UpperCamelCase_ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCamelCase_ ) for nn in next_nodes] )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.count_leaves(UpperCamelCase_ )
return len(UpperCamelCase_ ) != leaf_count
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ ):
super(UpperCamelCase_ , self ).__init__()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or len(UpperCamelCase_ ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(UpperCamelCase_ , UpperCamelCase_ ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
__UpperCAmelCase : Any = DisjunctiveTrie(UpperCamelCase_ )
__UpperCAmelCase : str = nested_token_ids
__UpperCAmelCase : int = self.trie.max_height
__UpperCAmelCase : Dict = []
__UpperCAmelCase : List[Any] = False
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.trie.next_tokens(self.current_seq )
if len(UpperCamelCase_ ) == 0:
return None
else:
return token_list
def _snake_case ( self , UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" )
__UpperCAmelCase : List[Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _snake_case ( self , UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase_ )}""" )
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = False
if self.does_advance(UpperCamelCase_ ):
self.current_seq.append(UpperCamelCase_ )
__UpperCAmelCase : int = True
else:
__UpperCAmelCase : Union[str, Any] = True
self.reset()
__UpperCAmelCase : Union[str, Any] = self.trie.reached_leaf(self.current_seq )
__UpperCAmelCase : Any = completed
return stepped, completed, reset
def _snake_case ( self ):
__UpperCAmelCase : int = False
__UpperCAmelCase : List[Any] = []
def _snake_case ( self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _snake_case ( self , UpperCamelCase_=False ):
__UpperCAmelCase : Dict = DisjunctiveConstraint(self.token_ids )
if stateful:
__UpperCAmelCase : Union[str, Any] = self.seqlen
__UpperCAmelCase : List[Any] = self.current_seq
__UpperCAmelCase : List[Any] = self.completed
return new_constraint
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = constraints
# max # of steps required to fulfill a given constraint
__UpperCAmelCase : Optional[int] = max([c.seqlen for c in constraints] )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Dict = False
self.init_state()
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : int = None
__UpperCAmelCase : List[str] = [constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.constraints]
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _snake_case ( self ):
__UpperCAmelCase : str = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__UpperCAmelCase : Union[str, Any] = constraint.advance()
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.append(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.extend(UpperCamelCase_ )
else:
__UpperCAmelCase : Dict = self.inprogress_constraint.advance()
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.append(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
token_list.extend(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 0:
return None
else:
return token_list
def _snake_case ( self , UpperCamelCase_ ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.add(UpperCamelCase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def _snake_case ( self , UpperCamelCase_ ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
__UpperCAmelCase , __UpperCAmelCase : int = False, False
if self.completed:
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Any = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.inprogress_constraint.update(UpperCamelCase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase_ ) )
__UpperCAmelCase : Tuple = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__UpperCAmelCase : List[Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
__UpperCAmelCase : List[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCamelCase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = pending_constraint.update(UpperCamelCase_ )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(UpperCamelCase_ )
__UpperCAmelCase : int = None
if not complete and stepped:
__UpperCAmelCase : Any = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__UpperCAmelCase : Dict = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__UpperCAmelCase : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _snake_case ( self , UpperCamelCase_=True ):
__UpperCAmelCase : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__UpperCAmelCase : List[Any] = [
constraint.copy(stateful=UpperCamelCase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__UpperCAmelCase : List[str] = self.inprogress_constraint.copy(stateful=UpperCamelCase_ )
__UpperCAmelCase : Dict = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
def _lowercase ( ) -> int:
"""simple docstring"""
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowerCamelCase__ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Optional[int] = CpmAntTokenizer
snake_case :List[str] = False
def _snake_case ( self ):
super().setUp()
__UpperCAmelCase : Any = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def _snake_case ( self ):
__UpperCAmelCase : str = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
__UpperCAmelCase : str = "今天天气真好!"
__UpperCAmelCase : str = ["今天", "天气", "真", "好", "!"]
__UpperCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[str] = "今天天气真好!"
__UpperCAmelCase : Any = [tokenizer.bos_token] + tokens
__UpperCAmelCase : Any = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
__UpperCAmelCase : int = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 10 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_a : str = (720, 1280) # Height, Width
_a : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
_a : List[str] = 1 / 100
_a : Any = ""
_a : Union[str, Any] = ""
_a : Optional[Any] = ""
_a : Tuple = 250
def _lowercase ( ) -> None:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : int = get_dataset(lowerCamelCase__ , lowerCamelCase__ )
for index in range(lowerCamelCase__ ):
__UpperCAmelCase : List[Any] = random.sample(range(len(lowerCamelCase__ ) ) , 4 )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = update_image_and_anno(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , filter_scale=lowerCamelCase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase : str = random_chars(32 )
__UpperCAmelCase : Any = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__UpperCAmelCase : Any = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowerCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCAmelCase : Union[str, Any] = []
for anno in new_annos:
__UpperCAmelCase : int = anno[3] - anno[1]
__UpperCAmelCase : List[Any] = anno[4] - anno[2]
__UpperCAmelCase : Any = anno[1] + width / 2
__UpperCAmelCase : Optional[int] = anno[2] + height / 2
__UpperCAmelCase : List[Any] = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowerCamelCase__ )
with open(f"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[list, list]:
"""simple docstring"""
__UpperCAmelCase : Dict = []
__UpperCAmelCase : List[Any] = []
for label_file in glob.glob(os.path.join(lowerCamelCase__ , "*.txt" ) ):
__UpperCAmelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(lowerCamelCase__ ) as in_file:
__UpperCAmelCase : int = in_file.readlines()
__UpperCAmelCase : List[Any] = os.path.join(lowerCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase : Optional[Any] = []
for obj_list in obj_lists:
__UpperCAmelCase : Union[str, Any] = obj_list.rstrip("\n" ).split(" " )
__UpperCAmelCase : str = float(obj[1] ) - float(obj[3] ) / 2
__UpperCAmelCase : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCAmelCase : Optional[Any] = float(obj[1] ) + float(obj[3] ) / 2
__UpperCAmelCase : str = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowerCamelCase__ )
labels.append(lowerCamelCase__ )
return img_paths, labels
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCAmelCase : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCAmelCase : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCAmelCase : Tuple = int(scale_x * output_size[1] )
__UpperCAmelCase : List[Any] = int(scale_y * output_size[0] )
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Any = []
for i, index in enumerate(lowerCamelCase__ ):
__UpperCAmelCase : str = all_img_list[index]
path_list.append(lowerCamelCase__ )
__UpperCAmelCase : Tuple = all_annos[index]
__UpperCAmelCase : List[str] = cva.imread(lowerCamelCase__ )
if i == 0: # top-left
__UpperCAmelCase : Any = cva.resize(lowerCamelCase__ , (divid_point_x, divid_point_y) )
__UpperCAmelCase : Tuple = img
for bbox in img_annos:
__UpperCAmelCase : List[Any] = bbox[1] * scale_x
__UpperCAmelCase : List[Any] = bbox[2] * scale_y
__UpperCAmelCase : Optional[Any] = bbox[3] * scale_x
__UpperCAmelCase : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCAmelCase : Any = cva.resize(lowerCamelCase__ , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCAmelCase : Union[str, Any] = img
for bbox in img_annos:
__UpperCAmelCase : Any = scale_x + bbox[1] * (1 - scale_x)
__UpperCAmelCase : int = bbox[2] * scale_y
__UpperCAmelCase : str = scale_x + bbox[3] * (1 - scale_x)
__UpperCAmelCase : str = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCAmelCase : Optional[int] = cva.resize(lowerCamelCase__ , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCAmelCase : Tuple = img
for bbox in img_annos:
__UpperCAmelCase : List[Any] = bbox[1] * scale_x
__UpperCAmelCase : int = scale_y + bbox[2] * (1 - scale_y)
__UpperCAmelCase : Any = bbox[3] * scale_x
__UpperCAmelCase : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCAmelCase : List[str] = cva.resize(
lowerCamelCase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCAmelCase : Optional[int] = img
for bbox in img_annos:
__UpperCAmelCase : Any = scale_x + bbox[1] * (1 - scale_x)
__UpperCAmelCase : Dict = scale_y + bbox[2] * (1 - scale_y)
__UpperCAmelCase : Any = scale_x + bbox[3] * (1 - scale_x)
__UpperCAmelCase : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCAmelCase : Any = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase : Dict = ascii_lowercase + digits
return "".join(random.choice(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 10 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 1 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 1 |
'''simple docstring'''
import os
from math import logaa
def _lowercase ( lowerCamelCase__ = "base_exp.txt" ) -> int:
"""simple docstring"""
__UpperCAmelCase : float = 0
__UpperCAmelCase : Dict = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ):
__UpperCAmelCase , __UpperCAmelCase : Any = list(map(lowerCamelCase__ , line.split("," ) ) )
if x * logaa(lowerCamelCase__ ) > largest:
__UpperCAmelCase : Any = x * logaa(lowerCamelCase__ )
__UpperCAmelCase : List[str] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=0.999 , lowerCamelCase__="cosine" , ) -> Dict:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__UpperCAmelCase : Any = []
for i in range(lowerCamelCase__ ):
__UpperCAmelCase : Tuple = i / num_diffusion_timesteps
__UpperCAmelCase : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ) , lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
class __A (__magic_name__ , __magic_name__ ):
snake_case :Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
snake_case :Dict = 2
@register_to_config
def __init__( self , UpperCamelCase_ = 10_00 , UpperCamelCase_ = 0.0_0_0_8_5 , UpperCamelCase_ = 0.0_1_2 , UpperCamelCase_ = "linear" , UpperCamelCase_ = None , UpperCamelCase_ = "epsilon" , UpperCamelCase_ = "linspace" , UpperCamelCase_ = 0 , ):
if trained_betas is not None:
__UpperCAmelCase : List[str] = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCAmelCase : int = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase : Tuple = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__UpperCAmelCase : Optional[Any] = 1.0 - self.betas
__UpperCAmelCase : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ):
if schedule_timesteps is None:
__UpperCAmelCase : int = self.timesteps
__UpperCAmelCase : str = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCAmelCase : Union[str, Any] = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__UpperCAmelCase : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__UpperCAmelCase : Dict = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _snake_case ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__UpperCAmelCase : Tuple = self.sigmas[step_index]
else:
__UpperCAmelCase : Tuple = self.sigmas_interpol[step_index]
__UpperCAmelCase : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
__UpperCAmelCase : Union[str, Any] = num_inference_steps
__UpperCAmelCase : Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCAmelCase : int = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCAmelCase : Union[str, Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCAmelCase : List[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : Union[str, Any] = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__UpperCAmelCase : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCAmelCase : str = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__UpperCAmelCase : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCAmelCase : int = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__UpperCAmelCase : Tuple = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__UpperCAmelCase : Tuple = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCAmelCase : Union[str, Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("mps" ):
# mps does not support float64
__UpperCAmelCase : Union[str, Any] = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__UpperCAmelCase : Tuple = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__UpperCAmelCase : Tuple = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__UpperCAmelCase : int = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__UpperCAmelCase : List[str] = torch.cat([timesteps[:1], interleaved_timesteps] )
__UpperCAmelCase : str = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCAmelCase : List[str] = defaultdict(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
# get log sigma
__UpperCAmelCase : Union[str, Any] = sigma.log()
# get distribution
__UpperCAmelCase : Dict = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__UpperCAmelCase : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__UpperCAmelCase : List[str] = low_idx + 1
__UpperCAmelCase : Union[str, Any] = self.log_sigmas[low_idx]
__UpperCAmelCase : List[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
__UpperCAmelCase : Tuple = (low - log_sigma) / (low - high)
__UpperCAmelCase : int = w.clamp(0 , 1 )
# transform interpolation to time range
__UpperCAmelCase : Optional[Any] = (1 - w) * low_idx + w * high_idx
__UpperCAmelCase : Dict = t.view(sigma.shape )
return t
@property
def _snake_case ( self ):
return self.sample is None
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , ):
__UpperCAmelCase : Optional[int] = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__UpperCAmelCase : List[str] = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCAmelCase : int = self.sigmas[step_index]
__UpperCAmelCase : str = self.sigmas_interpol[step_index + 1]
__UpperCAmelCase : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__UpperCAmelCase : List[str] = self.sigmas[step_index - 1]
__UpperCAmelCase : int = self.sigmas_interpol[step_index]
__UpperCAmelCase : int = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCAmelCase : Union[str, Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
__UpperCAmelCase : int = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCAmelCase : Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCAmelCase : List[Any] = sigma_interpol - sigma_hat
# store for 2nd order step
__UpperCAmelCase : int = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__UpperCAmelCase : int = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__UpperCAmelCase : Any = sigma_next - sigma_hat
__UpperCAmelCase : Dict = self.sample
__UpperCAmelCase : str = None
__UpperCAmelCase : List[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__UpperCAmelCase : Any = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__UpperCAmelCase : int = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCAmelCase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = self.timesteps.to(original_samples.device )
__UpperCAmelCase : Optional[Any] = timesteps.to(original_samples.device )
__UpperCAmelCase : Tuple = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__UpperCAmelCase : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCAmelCase : Optional[int] = sigma.unsqueeze(-1 )
__UpperCAmelCase : Union[str, Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 10 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Tuple = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "codegen"
snake_case :List[str] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCamelCase_=5_04_00 , UpperCamelCase_=20_48 , UpperCamelCase_=20_48 , UpperCamelCase_=40_96 , UpperCamelCase_=28 , UpperCamelCase_=16 , UpperCamelCase_=64 , UpperCamelCase_=None , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0_2 , UpperCamelCase_=True , UpperCamelCase_=5_02_56 , UpperCamelCase_=5_02_56 , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : str = n_ctx
__UpperCAmelCase : Optional[int] = n_positions
__UpperCAmelCase : Dict = n_embd
__UpperCAmelCase : Tuple = n_layer
__UpperCAmelCase : Tuple = n_head
__UpperCAmelCase : List[str] = n_inner
__UpperCAmelCase : Union[str, Any] = rotary_dim
__UpperCAmelCase : Union[str, Any] = activation_function
__UpperCAmelCase : List[Any] = resid_pdrop
__UpperCAmelCase : str = embd_pdrop
__UpperCAmelCase : Optional[int] = attn_pdrop
__UpperCAmelCase : Optional[int] = layer_norm_epsilon
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : Dict = bos_token_id
__UpperCAmelCase : Any = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ )
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = "default" , UpperCamelCase_ = None , UpperCamelCase_ = False , ):
super().__init__(UpperCamelCase_ , task=UpperCamelCase_ , patching_specs=UpperCamelCase_ , use_past=UpperCamelCase_ )
if not getattr(self._config , "pad_token_id" , UpperCamelCase_ ):
# TODO: how to do that better?
__UpperCAmelCase : Tuple = 0
@property
def _snake_case ( self ):
__UpperCAmelCase : str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs" )
__UpperCAmelCase : List[str] = {0: "batch", 1: "past_sequence + sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , ):
__UpperCAmelCase : Optional[int] = super(UpperCamelCase_ , self ).generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCAmelCase : Any = seqlen + 2
__UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase : List[str] = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(self.num_layers )
]
__UpperCAmelCase : str = common_inputs["attention_mask"]
if self.use_past:
__UpperCAmelCase : Any = ordered_inputs["attention_mask"].dtype
__UpperCAmelCase : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 10 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 1 |
'''simple docstring'''
_a : Tuple = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
_a : int = {value: key for key, value in encode_dict.items()}
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : str = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
if set(lowerCamelCase__ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
__UpperCAmelCase : Optional[Any] = ""
for word in coded.split():
while len(lowerCamelCase__ ) != 0:
decoded += decode_dict[word[:5]]
__UpperCAmelCase : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
_a : List[Any] = "2020.9.26"
_a : int = "xcodz-dot, cclaus, dhruvmanila"
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(lowerCamelCase__ , (float, int) ) for val in locals().values() ):
__UpperCAmelCase : Dict = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(lowerCamelCase__ )
__UpperCAmelCase : List[str] = ((x * distance) / (z + distance)) * scale
__UpperCAmelCase : Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("Axis must be a str" )
__UpperCAmelCase : int = locals()
del input_variables["axis"]
if not all(isinstance(lowerCamelCase__ , (float, int) ) for val in input_variables.values() ):
__UpperCAmelCase : int = (
"Input values except axis must either be float or int: "
f"""{list(input_variables.values() )}"""
)
raise TypeError(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__UpperCAmelCase : Any = x * math.cos(lowerCamelCase__ ) - y * math.sin(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = y * math.cos(lowerCamelCase__ ) + x * math.sin(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = z
elif axis == "x":
__UpperCAmelCase : Union[str, Any] = y * math.cos(lowerCamelCase__ ) - z * math.sin(lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = z * math.cos(lowerCamelCase__ ) + y * math.sin(lowerCamelCase__ )
__UpperCAmelCase : Dict = x
elif axis == "y":
__UpperCAmelCase : Optional[int] = x * math.cos(lowerCamelCase__ ) - z * math.sin(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = z * math.cos(lowerCamelCase__ ) + x * math.sin(lowerCamelCase__ )
__UpperCAmelCase : Tuple = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : int = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "trocr"
snake_case :Union[str, Any] = ["past_key_values"]
snake_case :int = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , UpperCamelCase_=5_02_65 , UpperCamelCase_=10_24 , UpperCamelCase_=12 , UpperCamelCase_=16 , UpperCamelCase_=40_96 , UpperCamelCase_="gelu" , UpperCamelCase_=5_12 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , **UpperCamelCase_ , ):
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = d_model
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : Optional[Any] = decoder_attention_heads
__UpperCAmelCase : Dict = decoder_ffn_dim
__UpperCAmelCase : List[Any] = activation_function
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : List[str] = dropout
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : Dict = activation_dropout
__UpperCAmelCase : Union[str, Any] = init_std
__UpperCAmelCase : Optional[int] = decoder_layerdrop
__UpperCAmelCase : Union[str, Any] = use_cache
__UpperCAmelCase : Tuple = scale_embedding
__UpperCAmelCase : str = use_learned_position_embeddings
__UpperCAmelCase : int = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class __A (__magic_name__ ):
snake_case :List[str] = "imagegpt"
snake_case :str = ["past_key_values"]
snake_case :Tuple = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCamelCase_=5_12 + 1 , UpperCamelCase_=32 * 32 , UpperCamelCase_=5_12 , UpperCamelCase_=24 , UpperCamelCase_=8 , UpperCamelCase_=None , UpperCamelCase_="quick_gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0_2 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Dict = n_positions
__UpperCAmelCase : Tuple = n_embd
__UpperCAmelCase : List[str] = n_layer
__UpperCAmelCase : Any = n_head
__UpperCAmelCase : Optional[Any] = n_inner
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Optional[Any] = resid_pdrop
__UpperCAmelCase : List[str] = embd_pdrop
__UpperCAmelCase : str = attn_pdrop
__UpperCAmelCase : int = layer_norm_epsilon
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : List[str] = scale_attn_weights
__UpperCAmelCase : int = use_cache
__UpperCAmelCase : Tuple = scale_attn_by_inverse_layer_idx
__UpperCAmelCase : Optional[int] = reorder_and_upcast_attn
__UpperCAmelCase : List[str] = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ )
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = 1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = 3 , UpperCamelCase_ = 32 , UpperCamelCase_ = 32 , ):
__UpperCAmelCase : Tuple = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = dict(preprocessor(images=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return inputs
| 10 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 1 |
'''simple docstring'''
def _lowercase ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_a : int = generate_large_matrix()
_a : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _lowercase ( lowerCamelCase__ ) -> None:
"""simple docstring"""
assert all(row == sorted(lowerCamelCase__ , reverse=lowerCamelCase__ ) for row in grid )
assert all(list(lowerCamelCase__ ) == sorted(lowerCamelCase__ , reverse=lowerCamelCase__ ) for col in zip(*lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : int = (left + right) // 2
__UpperCAmelCase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Optional[int] = mid + 1
else:
__UpperCAmelCase : int = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : int = 0
__UpperCAmelCase : List[Any] = len(grid[0] )
for i in range(len(lowerCamelCase__ ) ):
__UpperCAmelCase : Optional[Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowerCamelCase__ ) * len(grid[0] )) - total
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = 0
for row in grid:
for i, number in enumerate(lowerCamelCase__ ):
if number < 0:
total += len(lowerCamelCase__ ) - i
break
return total
def _lowercase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
__UpperCAmelCase : Optional[Any] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Any = timeit(f"""{func}(grid=grid)""" , setup=lowerCamelCase__ , number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 10 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 1 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
if not sentence:
return ""
__UpperCAmelCase : List[str] = dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Any = ShapEPipeline
snake_case :Tuple = ["prompt"]
snake_case :Dict = ["prompt"]
snake_case :Tuple = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case :Any = False
@property
def _snake_case ( self ):
return 32
@property
def _snake_case ( self ):
return 32
@property
def _snake_case ( self ):
return self.time_input_dim * 4
@property
def _snake_case ( self ):
return 8
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(UpperCamelCase_ )
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__UpperCAmelCase : int = PriorTransformer(**UpperCamelCase_ )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : int = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__UpperCAmelCase : Optional[Any] = ShapERenderer(**UpperCamelCase_ )
return model
def _snake_case ( self ):
__UpperCAmelCase : Any = self.dummy_prior
__UpperCAmelCase : Tuple = self.dummy_text_encoder
__UpperCAmelCase : List[Any] = self.dummy_tokenizer
__UpperCAmelCase : Dict = self.dummy_renderer
__UpperCAmelCase : Optional[Any] = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__UpperCAmelCase : Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : Optional[Any] = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : List[str] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Optional[int] = self.pipeline_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__UpperCAmelCase : Any = output.images[0]
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
__UpperCAmelCase : int = torch_device == "cpu"
__UpperCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**UpperCamelCase_ )
__UpperCAmelCase : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__UpperCAmelCase : Dict = batch_size * [inputs[key]]
__UpperCAmelCase : Any = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A (unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
__UpperCAmelCase : Tuple = ShapEPipeline.from_pretrained("openai/shap-e" )
__UpperCAmelCase : Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = pipe(
"a shark" , generator=UpperCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A (__magic_name__ ):
snake_case :Dict = (PNDMScheduler,)
snake_case :List[Any] = (("num_inference_steps", 50),)
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : int = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase_ )
return config
def _snake_case ( self , UpperCamelCase_=0 , **UpperCamelCase_ ):
__UpperCAmelCase : Any = dict(self.forward_default_kwargs )
__UpperCAmelCase : Dict = kwargs.pop("num_inference_steps" , UpperCamelCase_ )
__UpperCAmelCase : str = self.dummy_sample
__UpperCAmelCase : Optional[Any] = 0.1 * sample
__UpperCAmelCase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : List[Any] = self.get_scheduler_config(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
__UpperCAmelCase : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
__UpperCAmelCase : int = scheduler_class.from_pretrained(UpperCamelCase_ )
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
__UpperCAmelCase : Any = dummy_past_residuals[:]
__UpperCAmelCase : Any = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : Dict = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase : List[str] = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : int = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ):
pass
def _snake_case ( self , UpperCamelCase_=0 , **UpperCamelCase_ ):
__UpperCAmelCase : str = dict(self.forward_default_kwargs )
__UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , UpperCamelCase_ )
__UpperCAmelCase : int = self.dummy_sample
__UpperCAmelCase : int = 0.1 * sample
__UpperCAmelCase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : Optional[int] = self.get_scheduler_config()
__UpperCAmelCase : Optional[int] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = scheduler_class.from_pretrained(UpperCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase : Optional[int] = dummy_past_residuals[:]
__UpperCAmelCase : str = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : str = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase : Any = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : List[str] = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : Optional[int] = self.get_scheduler_config(**UpperCamelCase_ )
__UpperCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = 10
__UpperCAmelCase : Optional[Any] = self.dummy_model()
__UpperCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
__UpperCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__UpperCAmelCase : Any = model(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[Any] = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
__UpperCAmelCase : Optional[int] = kwargs.pop("num_inference_steps" , UpperCamelCase_ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : str = self.get_scheduler_config()
__UpperCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : Any = self.dummy_sample
__UpperCAmelCase : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase_ , "set_timesteps" ):
scheduler.set_timesteps(UpperCamelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase_ , "set_timesteps" ):
__UpperCAmelCase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase : int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__UpperCAmelCase : Optional[Any] = dummy_past_residuals[:]
__UpperCAmelCase : Dict = scheduler.step_prk(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : int = scheduler.step_prk(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase : Optional[int] = scheduler.step_plms(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : Union[str, Any] = scheduler.step_plms(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self ):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def _snake_case ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
__UpperCAmelCase : str = self.scheduler_classes[0]
__UpperCAmelCase : str = self.get_scheduler_config(steps_offset=1 )
__UpperCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def _snake_case ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def _snake_case ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def _snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def _snake_case ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCamelCase_ )
def _snake_case ( self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=UpperCamelCase_ )
def _snake_case ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__UpperCAmelCase : Dict = 27
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : Optional[Any] = self.dummy_sample
__UpperCAmelCase : int = 0.1 * sample
__UpperCAmelCase : Dict = self.get_scheduler_config()
__UpperCAmelCase : str = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__UpperCAmelCase : int = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
def _snake_case ( self ):
with self.assertRaises(UpperCamelCase_ ):
__UpperCAmelCase : Tuple = self.scheduler_classes[0]
__UpperCAmelCase : List[str] = self.get_scheduler_config()
__UpperCAmelCase : Optional[Any] = scheduler_class(**UpperCamelCase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _snake_case ( self ):
__UpperCAmelCase : Any = self.full_loop()
__UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.full_loop(prediction_type="v_prediction" )
__UpperCAmelCase : Any = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : str = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[str] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : Dict = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : Optional[int] = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_a : List[Any] = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_a : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__UpperCAmelCase : Dict = str(bin(lowerCamelCase__ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowerCamelCase__ ) )[2:]
__UpperCAmelCase : Optional[Any] = max(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase__ ) , b_binary.zfill(lowerCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 1 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __A (unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=4 , ):
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Optional[int] = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Any = use_attention_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Dict = num_choices
def _snake_case ( self ):
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_attention_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : str = None
if self.use_token_type_ids:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = config_and_inputs
__UpperCAmelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _snake_case ( self ):
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = config_and_inputs
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = True
snake_case :List[str] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = FlaxRobertaModelTester(self )
@slow
def _snake_case ( self ):
for model_class_name in self.all_model_classes:
__UpperCAmelCase : str = model_class_name.from_pretrained("roberta-base" , from_pt=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 10 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> None:
"""simple docstring"""
__UpperCAmelCase : str = len(lowerCamelCase__ )
print("The following activities are selected:" )
# The first activity is always selected
__UpperCAmelCase : int = 0
print(lowerCamelCase__ , end="," )
# Consider rest of the activities
for j in range(lowerCamelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCamelCase__ , end="," )
__UpperCAmelCase : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Any = [1, 3, 0, 5, 8, 5]
_a : Union[str, Any] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 10 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __A (unittest.TestCase , __magic_name__ ):
def _snake_case ( self ):
__UpperCAmelCase : str = load_tool("text-classification" )
self.tool.setup()
__UpperCAmelCase : Any = load_tool("text-classification" , remote=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(UpperCamelCase_ , "positive" )
def _snake_case ( self ):
__UpperCAmelCase : str = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(UpperCamelCase_ , "positive" )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(UpperCamelCase_ , "positive" )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(UpperCamelCase_ , "positive" )
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 1 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_a : List[Any] = True
from torch.cuda.amp import autocast
_a : int = logging.getLogger(__name__)
def _lowercase ( lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class __A :
snake_case :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case :Optional[bool] = field(
default=__magic_name__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
snake_case :Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
snake_case :Optional[float] = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
snake_case :Optional[float] = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
snake_case :Optional[float] = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
snake_case :Optional[float] = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
snake_case :Optional[float] = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class __A :
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case :Optional[str] = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
snake_case :bool = field(
default=__magic_name__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case :Optional[int] = field(
default=__magic_name__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case :Optional[int] = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
snake_case :Optional[int] = field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
snake_case :List[str] = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class __A :
snake_case :WavaVecaProcessor
snake_case :Union[bool, str] = True
snake_case :Optional[int] = None
snake_case :Optional[int] = None
snake_case :Optional[int] = None
snake_case :Optional[int] = None
def __call__( self , UpperCamelCase_ ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__UpperCAmelCase : List[Any] = [{"input_values": feature["input_values"]} for feature in features]
__UpperCAmelCase : Optional[Any] = [{"input_ids": feature["labels"]} for feature in features]
__UpperCAmelCase : int = self.processor.pad(
UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
__UpperCAmelCase : Optional[Any] = self.processor.pad(
labels=UpperCamelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
__UpperCAmelCase : Union[str, Any] = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
__UpperCAmelCase : Dict = labels
return batch
class __A (__magic_name__ ):
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
model.train()
__UpperCAmelCase : List[Any] = self._prepare_inputs(UpperCamelCase_ )
if self.use_amp:
with autocast():
__UpperCAmelCase : Optional[int] = self.compute_loss(UpperCamelCase_ , UpperCamelCase_ )
else:
__UpperCAmelCase : Any = self.compute_loss(UpperCamelCase_ , UpperCamelCase_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCAmelCase : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCAmelCase : Dict = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
__UpperCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase_ )
else:
loss.backward()
return loss.detach()
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , lowerCamelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCAmelCase : str = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCAmelCase : Dict = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
__UpperCAmelCase : Dict = f"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(lowerCamelCase__ ):
__UpperCAmelCase : Tuple = re.sub(lowerCamelCase__ , "" , batch["sentence"] ).lower() + " "
return batch
__UpperCAmelCase : Optional[Any] = train_dataset.map(lowerCamelCase__ , remove_columns=["sentence"] )
__UpperCAmelCase : str = eval_dataset.map(lowerCamelCase__ , remove_columns=["sentence"] )
def extract_all_chars(lowerCamelCase__ ):
__UpperCAmelCase : List[Any] = " ".join(batch["text"] )
__UpperCAmelCase : int = list(set(lowerCamelCase__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCAmelCase : List[str] = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=-1 , keep_in_memory=lowerCamelCase__ , remove_columns=train_dataset.column_names , )
__UpperCAmelCase : List[str] = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=-1 , keep_in_memory=lowerCamelCase__ , remove_columns=eval_dataset.column_names , )
__UpperCAmelCase : Optional[Any] = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
__UpperCAmelCase : List[str] = {v: k for k, v in enumerate(lowerCamelCase__ )}
__UpperCAmelCase : List[str] = vocab_dict[" "]
del vocab_dict[" "]
__UpperCAmelCase : int = len(lowerCamelCase__ )
__UpperCAmelCase : List[str] = len(lowerCamelCase__ )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Union[str, Any] = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
__UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ )
__UpperCAmelCase : int = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__UpperCAmelCase : Any = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCAmelCase : List[Any] = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
__UpperCAmelCase : Union[str, Any] = train_dataset.select(range(lowerCamelCase__ ) )
if data_args.max_val_samples is not None:
__UpperCAmelCase : Union[str, Any] = eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCAmelCase : Dict = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowerCamelCase__ ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = torchaudio.load(batch["path"] )
__UpperCAmelCase : List[Any] = resampler(lowerCamelCase__ ).squeeze().numpy()
__UpperCAmelCase : Optional[Any] = 1_6000
__UpperCAmelCase : Optional[Any] = batch["text"]
return batch
__UpperCAmelCase : Union[str, Any] = train_dataset.map(
lowerCamelCase__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase : str = eval_dataset.map(
lowerCamelCase__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowerCamelCase__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
__UpperCAmelCase : Optional[int] = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(lowerCamelCase__ )
return batch
__UpperCAmelCase : List[Any] = train_dataset.map(
lowerCamelCase__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , )
__UpperCAmelCase : Union[str, Any] = eval_dataset.map(
lowerCamelCase__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCamelCase__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCAmelCase : Dict = datasets.load_metric("wer" )
def compute_metrics(lowerCamelCase__ ):
__UpperCAmelCase : Dict = pred.predictions
__UpperCAmelCase : Optional[Any] = np.argmax(lowerCamelCase__ , axis=-1 )
__UpperCAmelCase : Optional[Any] = processor.tokenizer.pad_token_id
__UpperCAmelCase : int = processor.batch_decode(lowerCamelCase__ )
# we do not want to group tokens when computing the metrics
__UpperCAmelCase : Tuple = processor.batch_decode(pred.label_ids , group_tokens=lowerCamelCase__ )
__UpperCAmelCase : str = wer_metric.compute(predictions=lowerCamelCase__ , references=lowerCamelCase__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCAmelCase : Any = DataCollatorCTCWithPadding(processor=lowerCamelCase__ , padding=lowerCamelCase__ )
# Initialize our Trainer
__UpperCAmelCase : Any = CTCTrainer(
model=lowerCamelCase__ , data_collator=lowerCamelCase__ , args=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase : Tuple = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase : Any = model_args.model_name_or_path
else:
__UpperCAmelCase : Any = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
trainer.save_model()
__UpperCAmelCase : List[str] = train_result.metrics
__UpperCAmelCase : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
__UpperCAmelCase : Any = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("train" , lowerCamelCase__ )
trainer.save_metrics("train" , lowerCamelCase__ )
trainer.save_state()
# Evaluation
__UpperCAmelCase : str = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : Dict = trainer.evaluate()
__UpperCAmelCase : int = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
return results
if __name__ == "__main__":
main()
| 10 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
_a : Dict = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 10 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=12 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0 , UpperCamelCase_=None , ):
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = projection_dim
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[str] = dropout
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : str = bos_token_id
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[str] = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__UpperCAmelCase : List[Any] = input_mask.numpy()
__UpperCAmelCase , __UpperCAmelCase : List[Any] = input_mask.shape
__UpperCAmelCase : Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
__UpperCAmelCase : Any = 1
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase_ )
def _snake_case ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = TFBlipTextModel(config=UpperCamelCase_ )
__UpperCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , training=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
snake_case :Any = False
snake_case :Dict = False
snake_case :Dict = False
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def _snake_case ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _snake_case ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = TFBlipTextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase_ )
| 10 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A (unittest.TestCase ):
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = 0
def _snake_case ( self ):
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[Any] = Path(UpperCamelCase_ ) / "preprocessor_config.json"
__UpperCAmelCase : Optional[Any] = Path(UpperCamelCase_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(UpperCamelCase_ , "w" ) , )
json.dump({"model_type": "clip"} , open(UpperCamelCase_ , "w" ) )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[int] = Path(UpperCamelCase_ ) / "preprocessor_config.json"
__UpperCAmelCase : Optional[Any] = Path(UpperCamelCase_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(UpperCamelCase_ , "w" ) , )
json.dump({"model_type": "clip"} , open(UpperCamelCase_ , "w" ) )
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__UpperCAmelCase : Union[str, Any] = Path(UpperCamelCase_ ) / "preprocessor_config.json"
__UpperCAmelCase : List[Any] = Path(UpperCamelCase_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(UpperCamelCase_ , "w" ) , )
json.dump({"model_type": "clip"} , open(UpperCamelCase_ , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("image_processor_type" )
__UpperCAmelCase : Optional[Any] = CLIPImageProcessor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : str = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__UpperCAmelCase : Dict = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = Path(UpperCamelCase_ ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(UpperCamelCase_ , "w" ) , )
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
with self.assertRaisesRegex(
UpperCamelCase_ , "clip-base is not a local folder and is not a valid model identifier" ):
__UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("clip-base" )
def _snake_case ( self ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(UpperCamelCase_ , revision="aaaaaa" )
def _snake_case ( self ):
with self.assertRaisesRegex(
UpperCamelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__UpperCAmelCase : List[str] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=UpperCamelCase_ )
__UpperCAmelCase : Tuple = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : int = AutoImageProcessor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _snake_case ( self ):
try:
AutoConfig.register("custom" , UpperCamelCase_ )
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = Path(UpperCamelCase_ ) / "preprocessor_config.json"
__UpperCAmelCase : Dict = Path(UpperCamelCase_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(UpperCamelCase_ , "w" ) , )
json.dump({"model_type": "clip"} , open(UpperCamelCase_ , "w" ) )
__UpperCAmelCase : Dict = CustomImageProcessor.from_pretrained(UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ):
class __A (__magic_name__ ):
snake_case :Tuple = True
try:
AutoConfig.register("custom" , UpperCamelCase_ )
AutoImageProcessor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__UpperCAmelCase : str = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(UpperCamelCase_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __A (__magic_name__ ):
snake_case :str
snake_case :int
def _lowercase ( lowerCamelCase__ ) -> list[str]:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(lowerCamelCase__ ) )]
def _lowercase ( lowerCamelCase__ ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
__UpperCAmelCase : Union[str, Any] = all_rotations(lowerCamelCase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__UpperCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowerCamelCase__ ),
}
return response
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
__UpperCAmelCase : Tuple = int(lowerCamelCase__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(lowerCamelCase__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
__UpperCAmelCase : List[str] = [""] * len(lowerCamelCase__ )
for _ in range(len(lowerCamelCase__ ) ):
for i in range(len(lowerCamelCase__ ) ):
__UpperCAmelCase : Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_a : List[Any] = "Provide a string that I will generate its BWT transform: "
_a : Union[str, Any] = input(entry_msg).strip()
_a : Union[str, Any] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result['bwt_string']}'"""
)
_a : Optional[Any] = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
f"""we get original string '{original_string}'"""
)
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
snake_case :int
snake_case :Node | None = None
snake_case :Node | None = None
def _lowercase ( ) -> Node | None:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Node(1 )
__UpperCAmelCase : Union[str, Any] = Node(2 )
__UpperCAmelCase : Optional[int] = Node(3 )
__UpperCAmelCase : Optional[int] = Node(4 )
__UpperCAmelCase : List[Any] = Node(5 )
return tree
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _lowercase ( lowerCamelCase__ ) -> Sequence[Node | None]:
"""simple docstring"""
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : int = deque([root] )
while process_queue:
__UpperCAmelCase : int = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Sequence[Node | None]:
"""simple docstring"""
__UpperCAmelCase : list[Any] = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Sequence[Node | None]:
"""simple docstring"""
__UpperCAmelCase : list[Any] = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _lowercase ( lowerCamelCase__ ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : str = 0
__UpperCAmelCase : Tuple = height(lowerCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase__ , lowerCamelCase__ ) )
__UpperCAmelCase : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase__ , lowerCamelCase__ ) )
__UpperCAmelCase : Optional[int] = 0
return output
def _lowercase ( ) -> None: # Main function for testing.
"""simple docstring"""
__UpperCAmelCase : int = make_tree()
print(f"""In-order Traversal: {inorder(lowerCamelCase__ )}""" )
print(f"""Pre-order Traversal: {preorder(lowerCamelCase__ )}""" )
print(f"""Post-order Traversal: {postorder(lowerCamelCase__ )}""" , "\n" )
print(f"""Height of Tree: {height(lowerCamelCase__ )}""" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowerCamelCase__ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowerCamelCase__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase__ , level=lowerCamelCase__ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 10 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 1 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a : Optional[int] = False
class __A (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __A (unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__UpperCAmelCase : List[Any] = torch.manual_seed(0 )
__UpperCAmelCase : Dict = pipe.dual_guided(
prompt="first prompt" , image=UpperCamelCase_ , text_to_image_strength=0.7_5 , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase_ , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : List[str] = generator.manual_seed(0 )
__UpperCAmelCase : str = pipe.dual_guided(
prompt="first prompt" , image=UpperCamelCase_ , text_to_image_strength=0.7_5 , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _snake_case ( self ):
__UpperCAmelCase : str = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : int = "cyberpunk 2077"
__UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__UpperCAmelCase : List[str] = torch.manual_seed(0 )
__UpperCAmelCase : Any = pipe.dual_guided(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , text_to_image_strength=0.7_5 , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
__UpperCAmelCase : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase : List[Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger "
__UpperCAmelCase : int = torch.manual_seed(0 )
__UpperCAmelCase : Any = pipe.text_to_image(
prompt=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__UpperCAmelCase : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__UpperCAmelCase : Optional[int] = pipe.image_variation(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="numpy" ).images
__UpperCAmelCase : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 10 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : Optional[Any] = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
from functools import lru_cache
def _lowercase ( lowerCamelCase__ ) -> set:
"""simple docstring"""
__UpperCAmelCase : int = 2
__UpperCAmelCase : Optional[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCamelCase__ )
if n > 1:
factors.add(lowerCamelCase__ )
return factors
@lru_cache
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return len(unique_prime_factors(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
return len(set(lowerCamelCase__ ) ) in (0, 1)
def _lowercase ( lowerCamelCase__ ) -> list:
"""simple docstring"""
__UpperCAmelCase : List[str] = 2
while True:
# Increment each value of a generated range
__UpperCAmelCase : Tuple = [base + i for i in range(lowerCamelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__UpperCAmelCase : List[Any] = [upf_len(lowerCamelCase__ ) for x in group]
checker.append(lowerCamelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCamelCase__ ):
return group
# Increment our base variable by 1
base += 1
def _lowercase ( lowerCamelCase__ = 4 ) -> int:
"""simple docstring"""
__UpperCAmelCase : str = run(lowerCamelCase__ )
return results[0] if len(lowerCamelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
return str(lowerCamelCase__ ) == str(lowerCamelCase__ )[::-1]
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return int(lowerCamelCase__ ) + int(str(lowerCamelCase__ )[::-1] )
def _lowercase ( lowerCamelCase__ = 1_0000 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = []
for num in range(1 , lowerCamelCase__ ):
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : str = num
while iterations < 50:
__UpperCAmelCase : List[str] = sum_reverse(lowerCamelCase__ )
iterations += 1
if is_palindrome(lowerCamelCase__ ):
break
else:
lychrel_nums.append(lowerCamelCase__ )
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 1 |
'''simple docstring'''
_a : str = "Input must be a string of 8 numbers plus letter"
_a : List[str] = "TRWAGMYFPDXBNJZSQVHLCKE"
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase : Dict = f"""Expected string as input, found {type(lowerCamelCase__ ).__name__}"""
raise TypeError(lowerCamelCase__ )
__UpperCAmelCase : Tuple = spanish_id.replace("-" , "" ).upper()
if len(lowerCamelCase__ ) != 9:
raise ValueError(lowerCamelCase__ )
try:
__UpperCAmelCase : Dict = int(spanish_id_clean[0:8] )
__UpperCAmelCase : Dict = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCamelCase__ ) from ex
if letter.isdigit():
raise ValueError(lowerCamelCase__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 1 |
'''simple docstring'''
import os
def _lowercase ( lowerCamelCase__ = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as in_file:
__UpperCAmelCase : str = in_file.read()
__UpperCAmelCase : Union[str, Any] = [[int(lowerCamelCase__ ) for cell in row.split("," )] for row in data.strip().splitlines()]
__UpperCAmelCase : Any = [[0 for cell in row] for row in grid]
__UpperCAmelCase : List[str] = len(grid[0] )
__UpperCAmelCase : Any = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
__UpperCAmelCase : str = grid[0][0]
for i in range(1 , lowerCamelCase__ ):
__UpperCAmelCase : int = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCamelCase__ ):
__UpperCAmelCase : Optional[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCamelCase__ ):
for j in range(1 , lowerCamelCase__ ):
__UpperCAmelCase : Any = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
_a : int = TypeVar("_T")
class __A (Generic[_T] ):
def __init__( self , UpperCamelCase_ = None ):
__UpperCAmelCase : list[_T] = list(iterable or [] )
__UpperCAmelCase : list[_T] = []
def __len__( self ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _snake_case ( self , UpperCamelCase_ ):
self._stacka.append(UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : str = self._stacka.pop
__UpperCAmelCase : Union[str, Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 1 |
'''simple docstring'''
import sys
from collections import defaultdict
class __A :
def __init__( self ):
__UpperCAmelCase : Optional[Any] = []
def _snake_case ( self , UpperCamelCase_ ):
return self.node_position[vertex]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = pos
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__UpperCAmelCase : int = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__UpperCAmelCase : List[Any] = 2 * start + 1
else:
__UpperCAmelCase : Union[str, Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__UpperCAmelCase , __UpperCAmelCase : List[str] = (
heap[start],
positions[start],
)
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = temp, tempa
__UpperCAmelCase : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase_ )
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = position[index]
while index != 0:
__UpperCAmelCase : List[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__UpperCAmelCase : Union[str, Any] = heap[parent]
__UpperCAmelCase : Dict = position[parent]
self.set_position(position[parent] , UpperCamelCase_ )
else:
__UpperCAmelCase : Dict = val
__UpperCAmelCase : List[str] = temp
self.set_position(UpperCamelCase_ , UpperCamelCase_ )
break
__UpperCAmelCase : List[str] = parent
else:
__UpperCAmelCase : List[str] = val
__UpperCAmelCase : Union[str, Any] = temp
self.set_position(UpperCamelCase_ , 0 )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = len(UpperCamelCase_ ) // 2 - 1
for i in range(UpperCamelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , len(UpperCamelCase_ ) , UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = positions[0]
__UpperCAmelCase : Tuple = sys.maxsize
self.top_to_bottom(UpperCamelCase_ , 0 , len(UpperCamelCase_ ) , UpperCamelCase_ )
return temp
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : List[str] = Heap()
__UpperCAmelCase : Tuple = [0] * len(lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = [-1] * len(lowerCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__UpperCAmelCase : Tuple = [] # Heap of Distance of vertices from their neighboring vertex
__UpperCAmelCase : Dict = []
for vertex in range(len(lowerCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCamelCase__ )
heap.node_position.append(lowerCamelCase__ )
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : int = distance
heap.heapify(lowerCamelCase__ , lowerCamelCase__ )
for _ in range(1 , len(lowerCamelCase__ ) ):
__UpperCAmelCase : str = heap.delete_minimum(lowerCamelCase__ , lowerCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__UpperCAmelCase : Dict = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCamelCase__ )]
):
__UpperCAmelCase : List[str] = distance
heap.bottom_to_top(
lowerCamelCase__ , heap.get_position(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : List[str] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a : int = int(input("Enter number of edges: ").strip())
_a : Tuple = defaultdict(list)
for _ in range(edges_number):
_a : Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
for param in module.parameters():
__UpperCAmelCase : List[Any] = False
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCAmelCase : List[str] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = plt.imshow(lowerCamelCase__ )
fig.axes.get_xaxis().set_visible(lowerCamelCase__ )
fig.axes.get_yaxis().set_visible(lowerCamelCase__ )
plt.show()
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = datetime.now()
__UpperCAmelCase : int = current_time.strftime("%H:%M:%S" )
return timestamp
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
if len(lowerCamelCase__ ) == 0:
return array
__UpperCAmelCase , __UpperCAmelCase : str = min(lowerCamelCase__ ), max(lowerCamelCase__ )
# Compute the variables
__UpperCAmelCase : Any = _max - _min + 1
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__UpperCAmelCase : Optional[Any] = i - _min
__UpperCAmelCase : List[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__UpperCAmelCase : Optional[Any] = 0
for i in range(lowerCamelCase__ ):
while holes_repeat[i] > 0:
__UpperCAmelCase : int = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Optional[int] = input("Enter numbers separated by comma:\n")
_a : str = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 10 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Optional[int] = StableUnCLIPPipeline
snake_case :Optional[Any] = TEXT_TO_IMAGE_PARAMS
snake_case :List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
snake_case :Optional[int] = False
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 32
__UpperCAmelCase : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__UpperCAmelCase : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=UpperCamelCase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase : str = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase_ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=UpperCamelCase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase_ , layers_per_block=1 , upcast_attention=UpperCamelCase_ , use_linear_projection=UpperCamelCase_ , )
torch.manual_seed(0 )
__UpperCAmelCase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = AutoencoderKL()
__UpperCAmelCase : int = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase_ )
@slow
@require_torch_gpu
class __A (unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__UpperCAmelCase : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : int = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase : Tuple = pipe("anime turle" , generator=UpperCamelCase_ , output_type="np" )
__UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase : List[Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__UpperCAmelCase : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Optional[int] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_a : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
__UpperCAmelCase : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
__UpperCAmelCase : List[str] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
__UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCAmelCase : List[str] = value
elif weight_type == "weight_g":
__UpperCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : Dict = value
elif weight_type == "bias":
__UpperCAmelCase : int = value
else:
__UpperCAmelCase : int = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : str = []
__UpperCAmelCase : List[Any] = fairseq_model.state_dict()
__UpperCAmelCase : Union[str, Any] = hf_model.feature_extractor
__UpperCAmelCase : Dict = hf_model.adapter
for name, value in fairseq_dict.items():
__UpperCAmelCase : str = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
__UpperCAmelCase : int = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
__UpperCAmelCase : int = name.split(lowerCamelCase__ )[0].split("." )[-2]
__UpperCAmelCase : Any = mapped_key.replace("*" , lowerCamelCase__ )
if "weight_g" in name:
__UpperCAmelCase : Dict = "weight_g"
elif "weight_v" in name:
__UpperCAmelCase : Optional[int] = "weight_v"
elif "bias" in name:
__UpperCAmelCase : Union[str, Any] = "bias"
elif "weight" in name:
__UpperCAmelCase : Optional[Any] = "weight"
else:
__UpperCAmelCase : Dict = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : List[Any] = full_name.split("conv_layers." )[-1]
__UpperCAmelCase : Dict = name.split("." )
__UpperCAmelCase : List[Any] = int(items[0] )
__UpperCAmelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCAmelCase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCAmelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCAmelCase : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCAmelCase : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Tuple = full_name.split("adaptor." )[-1]
__UpperCAmelCase : Optional[int] = name.split("." )
if items[1].isdigit():
__UpperCAmelCase : Union[str, Any] = int(items[1] )
else:
__UpperCAmelCase : Optional[int] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__UpperCAmelCase : Optional[Any] = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__UpperCAmelCase : List[Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__UpperCAmelCase : str = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__UpperCAmelCase : List[str] = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__UpperCAmelCase : List[Any] = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__UpperCAmelCase : Dict = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = emb.weight.shape
__UpperCAmelCase : str = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
__UpperCAmelCase : Dict = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : List[Any] = WavaVecaConfig.from_pretrained(
lowerCamelCase__ , add_adapter=lowerCamelCase__ , adapter_stride=lowerCamelCase__ , adapter_kernel_size=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , output_hidden_size=lowerCamelCase__ , )
__UpperCAmelCase : int = MBartConfig.from_pretrained(lowerCamelCase__ )
# load model
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__UpperCAmelCase : int = model[0].eval()
# load feature extractor
__UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ , use_auth_token=lowerCamelCase__ )
# set weights for wav2vec2 encoder
__UpperCAmelCase : Tuple = WavaVecaModel(lowerCamelCase__ )
recursively_load_weights_wavaveca(model.encoder , lowerCamelCase__ )
# load decoder weights
__UpperCAmelCase : List[Any] = MBartForCausalLM(lowerCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase__ )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__UpperCAmelCase : int = SpeechEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Union[str, Any] = MBartaaTokenizer(lowerCamelCase__ )
tokenizer.save_pretrained(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = hf_wavavec.config.to_dict()
__UpperCAmelCase : int = tokenizer.pad_token_id
__UpperCAmelCase : Union[str, Any] = tokenizer.bos_token_id
__UpperCAmelCase : Union[str, Any] = tokenizer.eos_token_id
__UpperCAmelCase : Tuple = "mbart50"
__UpperCAmelCase : Tuple = "wav2vec2"
__UpperCAmelCase : int = tokenizer.eos_token_id
__UpperCAmelCase : Any = 25_0004
__UpperCAmelCase : int = tokenizer.eos_token_id
__UpperCAmelCase : Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase__ )
hf_wavavec.save_pretrained(lowerCamelCase__ )
feature_extractor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config")
_a : str = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 10 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : str = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f"""{test_file} instead.""" )
__UpperCAmelCase : Optional[Any] = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__UpperCAmelCase : str = components[:-1] + [test_fn.replace(".py" , "" )]
__UpperCAmelCase : Dict = ".".join(lowerCamelCase__ )
return test_module_path
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = get_module_path(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = importlib.import_module(lowerCamelCase__ )
return test_module
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Any = get_test_module(lowerCamelCase__ )
for attr in dir(lowerCamelCase__ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# sort with class names
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x.__name__ )
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : int = []
__UpperCAmelCase : Any = get_test_module(lowerCamelCase__ )
for attr in dir(lowerCamelCase__ ):
__UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "all_model_classes" , [] )
if len(lowerCamelCase__ ) > 0:
test_classes.append(lowerCamelCase__ )
# sort with class names
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x.__name__ )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Tuple = get_test_classes(lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x.__name__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = test_class()
if hasattr(lowerCamelCase__ , "setUp" ):
test.setUp()
__UpperCAmelCase : str = None
if hasattr(lowerCamelCase__ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__UpperCAmelCase : Optional[int] = test.model_tester.__class__
return model_tester
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : str = get_test_classes(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCamelCase__ )
# sort with class names
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x.__name__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = get_test_classes_for_model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = []
for test_class in test_classes:
__UpperCAmelCase : Dict = get_model_tester_from_test_class(lowerCamelCase__ )
if tester_class is not None:
tester_classes.append(lowerCamelCase__ )
# sort with class names
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x.__name__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : str = get_test_classes(lowerCamelCase__ )
__UpperCAmelCase : Tuple = {test_class: get_model_tester_from_test_class(lowerCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Tuple = get_model_classes(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = {
model_class: get_test_classes_for_model(lowerCamelCase__ , lowerCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = get_model_classes(lowerCamelCase__ )
__UpperCAmelCase : str = {
model_class: get_tester_classes_for_model(lowerCamelCase__ , lowerCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return o
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return o.__name__
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return [to_json(lowerCamelCase__ ) for x in o]
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {to_json(lowerCamelCase__ ): to_json(lowerCamelCase__ ) for k, v in o.items()}
else:
return o
| 10 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A (__magic_name__ ):
snake_case :Union[str, Any] = (UnCLIPScheduler,)
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : Tuple = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**UpperCamelCase_ )
return config
def _snake_case ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def _snake_case ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def _snake_case ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def _snake_case ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase_ )
def _snake_case ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def _snake_case ( self ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase_ , prev_timestep=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : Optional[int] = self.get_scheduler_config(variance_type="fixed_small_log" )
__UpperCAmelCase : int = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.scheduler_classes[0]
__UpperCAmelCase : str = self.get_scheduler_config(variance_type="learned_range" )
__UpperCAmelCase : Any = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase_ ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=UpperCamelCase_ ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=UpperCamelCase_ ) - -0.0_0_1_0_0_1_1 < 1E-5
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.scheduler_classes[0]
__UpperCAmelCase : Dict = self.get_scheduler_config()
__UpperCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = scheduler.timesteps
__UpperCAmelCase : Any = self.dummy_model()
__UpperCAmelCase : List[Any] = self.dummy_sample_deter
__UpperCAmelCase : Any = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase_ ):
# 1. predict noise residual
__UpperCAmelCase : Dict = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase : Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
__UpperCAmelCase : List[Any] = pred_prev_sample
__UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.scheduler_classes[0]
__UpperCAmelCase : List[str] = self.get_scheduler_config()
__UpperCAmelCase : str = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(25 )
__UpperCAmelCase : str = scheduler.timesteps
__UpperCAmelCase : Optional[int] = self.dummy_model()
__UpperCAmelCase : Dict = self.dummy_sample_deter
__UpperCAmelCase : List[str] = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase_ ):
# 1. predict noise residual
__UpperCAmelCase : Tuple = model(UpperCamelCase_ , UpperCamelCase_ )
if i + 1 == timesteps.shape[0]:
__UpperCAmelCase : Optional[int] = None
else:
__UpperCAmelCase : Dict = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase : Tuple = scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , prev_timestep=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
__UpperCAmelCase : Optional[int] = pred_prev_sample
__UpperCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_a : Optional[Any] = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_a : Tuple = {
"ctrl": 256,
}
_a : Dict = {
"Pregnancy": 168629,
"Christianity": 7675,
"Explain": 106423,
"Fitness": 63440,
"Saving": 63163,
"Ask": 27171,
"Ass": 95985,
"Joke": 163509,
"Questions": 45622,
"Thoughts": 49605,
"Retail": 52342,
"Feminism": 164338,
"Writing": 11992,
"Atheism": 192263,
"Netflix": 48616,
"Computing": 39639,
"Opinion": 43213,
"Alone": 44967,
"Funny": 58917,
"Gaming": 40358,
"Human": 4088,
"India": 1331,
"Joker": 77138,
"Diet": 36206,
"Legal": 11859,
"Norman": 4939,
"Tip": 72689,
"Weight": 52343,
"Movies": 46273,
"Running": 23425,
"Science": 2090,
"Horror": 37793,
"Confession": 60572,
"Finance": 12250,
"Politics": 16360,
"Scary": 191985,
"Support": 12654,
"Technologies": 32516,
"Teenage": 66160,
"Event": 32769,
"Learned": 67460,
"Notion": 182770,
"Wikipedia": 37583,
"Books": 6665,
"Extract": 76050,
"Confessions": 102701,
"Conspiracy": 75932,
"Links": 63674,
"Narcissus": 150425,
"Relationship": 54766,
"Relationships": 134796,
"Reviews": 41671,
"News": 4256,
"Translation": 26820,
"multilingual": 128406,
}
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = set()
__UpperCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[int] = char
__UpperCAmelCase : List[Any] = set(lowerCamelCase__ )
return pairs
class __A (__magic_name__ ):
snake_case :List[Any] = VOCAB_FILES_NAMES
snake_case :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = CONTROL_CODES
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<unk>" , **UpperCamelCase_ ):
super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : Union[str, Any] = json.load(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : List[Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in merges]
__UpperCAmelCase : str = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : List[Any] = {}
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[Any] = tuple(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__UpperCAmelCase : Tuple = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : int = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = bigram
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[Any] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Any = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Optional[Any] = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : int = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Any = "@@ ".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = word[:-4]
__UpperCAmelCase : Optional[Any] = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = []
__UpperCAmelCase : str = re.findall(r"\S+\n?" , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(" " ) ) )
return split_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = " ".join(UpperCamelCase_ ).replace("@@ " , "" ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : List[str] = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : List[str] = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 10 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[int] = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Optional[Any] = logging.getLogger(__name__)
_a : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__magic_name__ )} , )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __A :
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "The input training data file (a text file)."} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
snake_case :bool = field(
default=__magic_name__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
snake_case :bool = field(
default=__magic_name__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
snake_case :bool = field(default=__magic_name__ , metadata={"help": "Whether ot not to use whole word mask."} )
snake_case :float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
snake_case :float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
snake_case :int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
snake_case :int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
snake_case :bool = field(
default=__magic_name__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , ) -> Tuple:
"""simple docstring"""
def _dataset(lowerCamelCase__ , lowerCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size , ref_path=lowerCamelCase__ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__UpperCAmelCase : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__UpperCAmelCase : int = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
__UpperCAmelCase : Any = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
__UpperCAmelCase : str = AutoModelWithLMHead.from_config(lowerCamelCase__ )
model.resize_token_embeddings(len(lowerCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
__UpperCAmelCase : Any = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__UpperCAmelCase : Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__UpperCAmelCase : int = (
get_dataset(lowerCamelCase__ , tokenizer=lowerCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__UpperCAmelCase : List[Any] = (
get_dataset(lowerCamelCase__ , tokenizer=lowerCamelCase__ , evaluate=lowerCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__UpperCAmelCase : Tuple = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__UpperCAmelCase : int = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
__UpperCAmelCase : Tuple = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__UpperCAmelCase : List[str] = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , data_collator=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , prediction_loss_only=lowerCamelCase__ , )
# Training
if training_args.do_train:
__UpperCAmelCase : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : Dict = trainer.evaluate()
__UpperCAmelCase : Tuple = math.exp(eval_output["eval_loss"] )
__UpperCAmelCase : Optional[int] = {"perplexity": perplexity}
__UpperCAmelCase : int = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(lowerCamelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , lowerCamelCase__ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(lowerCamelCase__ )
return results
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 10 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __A (__magic_name__ ):
snake_case :torch.FloatTensor
class __A (__magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self , UpperCamelCase_ = 32 , UpperCamelCase_ = 64 , UpperCamelCase_ = 20 , UpperCamelCase_ = 7_68 , UpperCamelCase_=77 , UpperCamelCase_=4 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = "silu" , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "linear" , UpperCamelCase_ = "prd" , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
super().__init__()
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : Dict = attention_head_dim
__UpperCAmelCase : str = num_attention_heads * attention_head_dim
__UpperCAmelCase : Any = additional_embeddings
__UpperCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
__UpperCAmelCase : List[str] = embedding_proj_dim or embedding_dim
__UpperCAmelCase : Any = clip_embed_dim or embedding_dim
__UpperCAmelCase : List[str] = Timesteps(UpperCamelCase_ , UpperCamelCase_ , 0 )
__UpperCAmelCase : Any = TimestepEmbedding(UpperCamelCase_ , UpperCamelCase_ , out_dim=UpperCamelCase_ , act_fn=UpperCamelCase_ )
__UpperCAmelCase : Tuple = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
if embedding_proj_norm_type is None:
__UpperCAmelCase : List[str] = None
elif embedding_proj_norm_type == "layer":
__UpperCAmelCase : Dict = nn.LayerNorm(UpperCamelCase_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__UpperCAmelCase : List[Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
if encoder_hid_proj_type is None:
__UpperCAmelCase : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
__UpperCAmelCase : Dict = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__UpperCAmelCase : str = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase_ ) )
if added_emb_type == "prd":
__UpperCAmelCase : Optional[Any] = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase_ ) )
elif added_emb_type is None:
__UpperCAmelCase : Any = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__UpperCAmelCase : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn="gelu" , attention_bias=UpperCamelCase_ , )
for d in range(UpperCamelCase_ )
] )
if norm_in_type == "layer":
__UpperCAmelCase : Union[str, Any] = nn.LayerNorm(UpperCamelCase_ )
elif norm_in_type is None:
__UpperCAmelCase : Tuple = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__UpperCAmelCase : Dict = nn.LayerNorm(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
__UpperCAmelCase : List[Any] = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , UpperCamelCase_ , persistent=UpperCamelCase_ )
__UpperCAmelCase : int = nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
__UpperCAmelCase : Tuple = nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {}
def fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , "set_processor" ):
__UpperCAmelCase : Dict = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase_ , UpperCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return processors
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , "set_processor" ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
module.set_processor(UpperCamelCase_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase_ , UpperCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
self.set_attn_processor(AttnProcessor() )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , ):
__UpperCAmelCase : Tuple = hidden_states.shape[0]
__UpperCAmelCase : Optional[Any] = timestep
if not torch.is_tensor(UpperCamelCase_ ):
__UpperCAmelCase : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : int = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : int = timesteps * torch.ones(UpperCamelCase_ , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase : Union[str, Any] = self.time_proj(UpperCamelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCAmelCase : Union[str, Any] = timesteps_projected.to(dtype=self.dtype )
__UpperCAmelCase : Optional[Any] = self.time_embedding(UpperCamelCase_ )
if self.embedding_proj_norm is not None:
__UpperCAmelCase : Optional[int] = self.embedding_proj_norm(UpperCamelCase_ )
__UpperCAmelCase : int = self.embedding_proj(UpperCamelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCAmelCase : Optional[Any] = self.encoder_hidden_states_proj(UpperCamelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
__UpperCAmelCase : List[str] = self.proj_in(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
__UpperCAmelCase : Any = []
__UpperCAmelCase : Dict = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCAmelCase : List[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCAmelCase : Dict = hidden_states[:, None, :]
__UpperCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCAmelCase : Any = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase_ , -1 , -1 )
additional_embeds.append(UpperCamelCase_ )
__UpperCAmelCase : int = torch.cat(
UpperCamelCase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCAmelCase : int = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCAmelCase : int = F.pad(
UpperCamelCase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCAmelCase : str = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCAmelCase : Dict = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
__UpperCAmelCase : List[str] = F.pad(UpperCamelCase_ , (0, self.additional_embeddings) , value=0.0 )
__UpperCAmelCase : Any = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCAmelCase : str = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCAmelCase : Dict = self.norm_in(UpperCamelCase_ )
for block in self.transformer_blocks:
__UpperCAmelCase : Optional[Any] = block(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__UpperCAmelCase : Any = self.norm_out(UpperCamelCase_ )
if self.prd_embedding is not None:
__UpperCAmelCase : List[Any] = hidden_states[:, -1]
else:
__UpperCAmelCase : Dict = hidden_states[:, additional_embeddings_len:]
__UpperCAmelCase : List[Any] = self.proj_to_clip_embeddings(UpperCamelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 10 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_a : Optional[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __A (__magic_name__ ):
snake_case :bool = field(default=__magic_name__ , metadata={"help": "Whether to use SortishSampler or not."} )
snake_case :bool = field(
default=__magic_name__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
snake_case :Optional[int] = field(
default=__magic_name__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
snake_case :Optional[int] = field(
default=__magic_name__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
snake_case :Optional[Union[str, Path, GenerationConfig]] = field(
default=__magic_name__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _snake_case ( self ):
__UpperCAmelCase : Any = super().to_dict()
for k, v in d.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = v.to_dict()
return d
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Optional[int] = knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 )
if weights[index] <= max_weight:
__UpperCAmelCase : Union[str, Any] = values[index] + knapsack(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , max_weight - weights[index] , index + 1 )
return max(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ )
__UpperCAmelCase : List[str] = sum(lowerCamelCase__ )
__UpperCAmelCase : Any = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__UpperCAmelCase : Union[str, Any] = True
for i in range(1 , s + 1 ):
__UpperCAmelCase : Dict = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__UpperCAmelCase : Tuple = dp[i][j - 1]
if arr[i - 1] <= j:
__UpperCAmelCase : str = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__UpperCAmelCase : Dict = s - 2 * j
break
return diff
| 10 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A (unittest.TestCase ):
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : int = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.dummy_uncond_unet
__UpperCAmelCase : str = DDIMScheduler()
__UpperCAmelCase : Optional[Any] = self.dummy_vq_model
__UpperCAmelCase : Union[str, Any] = LDMPipeline(unet=UpperCamelCase_ , vqvae=UpperCamelCase_ , scheduler=UpperCamelCase_ )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCAmelCase : Dict = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type="numpy" ).images
__UpperCAmelCase : List[str] = torch.manual_seed(0 )
__UpperCAmelCase : Any = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type="numpy" , return_dict=UpperCamelCase_ )[0]
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__UpperCAmelCase : str = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self ):
__UpperCAmelCase : Dict = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : List[Any] = torch.manual_seed(0 )
__UpperCAmelCase : Dict = ldm(generator=UpperCamelCase_ , num_inference_steps=5 , output_type="numpy" ).images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__UpperCAmelCase : List[str] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__UpperCAmelCase : List[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger("transformers.models.speecht5")
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
hf_model.apply_weight_norm()
__UpperCAmelCase : int = checkpoint["input_conv.weight_g"]
__UpperCAmelCase : Tuple = checkpoint["input_conv.weight_v"]
__UpperCAmelCase : str = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
__UpperCAmelCase : Optional[Any] = checkpoint[f"""upsamples.{i}.1.weight_g"""]
__UpperCAmelCase : List[str] = checkpoint[f"""upsamples.{i}.1.weight_v"""]
__UpperCAmelCase : int = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__UpperCAmelCase : Optional[Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
__UpperCAmelCase : Optional[Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
__UpperCAmelCase : Optional[Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
__UpperCAmelCase : Union[str, Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
__UpperCAmelCase : Any = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
__UpperCAmelCase : Tuple = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
__UpperCAmelCase : List[Any] = checkpoint["output_conv.1.weight_g"]
__UpperCAmelCase : Union[str, Any] = checkpoint["output_conv.1.weight_v"]
__UpperCAmelCase : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
__UpperCAmelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase__ )
else:
__UpperCAmelCase : Optional[int] = SpeechTaHifiGanConfig()
__UpperCAmelCase : Optional[int] = SpeechTaHifiGan(lowerCamelCase__ )
__UpperCAmelCase : int = torch.load(lowerCamelCase__ )
load_weights(orig_checkpoint["model"]["generator"] , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : str = np.load(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = stats[0].reshape(-1 )
__UpperCAmelCase : Union[str, Any] = stats[1].reshape(-1 )
__UpperCAmelCase : Optional[int] = torch.from_numpy(lowerCamelCase__ ).float()
__UpperCAmelCase : int = torch.from_numpy(lowerCamelCase__ ).float()
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_a : str = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
from collections.abc import Callable
class __A :
def __init__( self , UpperCamelCase_ = None ):
# Stores actual heap items.
__UpperCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
__UpperCAmelCase : dict = {}
# Stores current size of heap.
__UpperCAmelCase : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__UpperCAmelCase : Optional[Any] = key or (lambda UpperCamelCase_ : x)
def _snake_case ( self , UpperCamelCase_ ):
return int((i - 1) / 2 ) if i > 0 else None
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase , __UpperCAmelCase : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__UpperCAmelCase , __UpperCAmelCase : Any = self.arr[j], self.arr[i]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
return self.arr[i][1] < self.arr[j][1]
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : str = self._left(UpperCamelCase_ )
__UpperCAmelCase : str = self._right(UpperCamelCase_ )
__UpperCAmelCase : int = i
if left is not None and not self._cmp(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = left
if right is not None and not self._cmp(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = right
return valid_parent
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self._parent(UpperCamelCase_ )
while parent is not None and not self._cmp(UpperCamelCase_ , UpperCamelCase_ ):
self._swap(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase , __UpperCAmelCase : int = parent, self._parent(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self._get_valid_parent(UpperCamelCase_ )
while valid_parent != index:
self._swap(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase , __UpperCAmelCase : Tuple = valid_parent, self._get_valid_parent(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
if item not in self.pos_map:
return
__UpperCAmelCase : str = self.pos_map[item]
__UpperCAmelCase : List[Any] = [item, self.key(UpperCamelCase_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(UpperCamelCase_ )
self._heapify_down(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if item not in self.pos_map:
return
__UpperCAmelCase : str = self.pos_map[item]
del self.pos_map[item]
__UpperCAmelCase : Union[str, Any] = self.arr[self.size - 1]
__UpperCAmelCase : int = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(UpperCamelCase_ )
self._heapify_down(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(UpperCamelCase_ )] )
else:
__UpperCAmelCase : int = [item, self.key(UpperCamelCase_ )]
__UpperCAmelCase : Optional[int] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def _snake_case ( self ):
return self.arr[0] if self.size else None
def _snake_case ( self ):
__UpperCAmelCase : str = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _lowercase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 1 |
'''simple docstring'''
def _lowercase ( ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : str = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowerCamelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 10 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Any = int(lowerCamelCase__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase : List[str] = divmod(lowerCamelCase__ , 2 )
return binary_recursive(lowerCamelCase__ ) + str(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : str = str(lowerCamelCase__ ).strip()
if not number:
raise ValueError("No input value was provided" )
__UpperCAmelCase : List[str] = "-" if number.startswith("-" ) else ""
__UpperCAmelCase : List[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"""{negative}0b{binary_recursive(int(lowerCamelCase__ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
_a : int = []
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
for i in range(len(lowerCamelCase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCamelCase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCamelCase__ , -1 , -1 ) , range(lowerCamelCase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCamelCase__ , -1 , -1 ) , range(lowerCamelCase__ , len(lowerCamelCase__ ) ) ):
if board[i][j] == 1:
return False
return True
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if row >= len(lowerCamelCase__ ):
solution.append(lowerCamelCase__ )
printboard(lowerCamelCase__ )
print()
return True
for i in range(len(lowerCamelCase__ ) ):
if is_safe(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase : Tuple = 1
solve(lowerCamelCase__ , row + 1 )
__UpperCAmelCase : Tuple = 0
return False
def _lowercase ( lowerCamelCase__ ) -> None:
"""simple docstring"""
for i in range(len(lowerCamelCase__ ) ):
for j in range(len(lowerCamelCase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_a : Any = 8
_a : Dict = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 10 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None ) -> list[list[str]]:
"""simple docstring"""
__UpperCAmelCase : str = word_bank or []
# create a table
__UpperCAmelCase : int = len(lowerCamelCase__ ) + 1
__UpperCAmelCase : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
__UpperCAmelCase : Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
__UpperCAmelCase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_a : Dict = logging.get_logger(__name__)
# TODO: upload to AWS
_a : Optional[Any] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "retribert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=8 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=True , UpperCamelCase_=1_28 , UpperCamelCase_=0 , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = share_encoders
__UpperCAmelCase : Tuple = projection_dim
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
return [ord(lowerCamelCase__ ) - 96 for elem in plain]
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def _lowercase ( ) -> None:
"""simple docstring"""
__UpperCAmelCase : List[Any] = encode(input("-> " ).strip().lower() )
print("Encoded: " , lowerCamelCase__ )
print("Decoded:" , decode(lowerCamelCase__ ) )
if __name__ == "__main__":
main()
| 10 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : Any = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.