code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import os
def _lowercase ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Tuple = os.path.join(os.path.dirname(lowerCamelCase__ ) , "num.txt" )
with open(lowerCamelCase__ ) as file_hand:
return str(sum(int(lowerCamelCase__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : int = str(id_ )
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Any = {} # {vertex:distance}
def __lt__( self , UpperCamelCase_ ):
return self.key < other.key
def __repr__( self ):
return self.id
def _snake_case ( self , UpperCamelCase_ ):
self.neighbors.append(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = weight
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1] , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list:
"""simple docstring"""
__UpperCAmelCase : Tuple = []
for u in graph:
__UpperCAmelCase : Tuple = math.inf
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = graph[:]
while q:
__UpperCAmelCase : Tuple = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCAmelCase : List[Any] = u
__UpperCAmelCase : str = u.edges[v.id]
for i in range(1 , len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
__UpperCAmelCase : Union[str, Any] = math.inf
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
__UpperCAmelCase : List[str] = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCAmelCase : Any = u
__UpperCAmelCase : List[Any] = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowercase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 1 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a : List[Any] = "__DUMMY_TRANSFORMERS_USER__"
_a : str = "Dummy User"
_a : str = "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"
_a : Optional[Any] = "https://hub-ci.huggingface.co"
_a : List[str] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
_a : List[Any] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
_a : List[str] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCamelCase__ )
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCamelCase__ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCamelCase__ )
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCamelCase__ )
@pytest.fixture
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
HfFolder.save_token(lowerCamelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
return HfApi(endpoint=lowerCamelCase__ )
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : int = HfFolder.get_token()
HfFolder.save_token(lowerCamelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCamelCase__ )
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
def _cleanup_repo(lowerCamelCase__ ):
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
@contextmanager
def _temporary_repo(lowerCamelCase__ ):
try:
yield repo_id
finally:
cleanup_repo(lowerCamelCase__ )
return _temporary_repo
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : List[str] = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__UpperCAmelCase : int = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="data/text_data.txt" , repo_id=lowerCamelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : str = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__UpperCAmelCase : str = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="data.zip" , repo_id=lowerCamelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : List[str] = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__UpperCAmelCase : int = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="data.zip" , repo_id=lowerCamelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 10 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 1 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 1 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="attention" ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
__UpperCAmelCase : Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
__UpperCAmelCase : str = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
__UpperCAmelCase : str = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
"""simple docstring"""
if split_mlp_wi:
__UpperCAmelCase : List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
__UpperCAmelCase : List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
__UpperCAmelCase : int = (wi_a, wi_a)
else:
__UpperCAmelCase : Optional[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
__UpperCAmelCase : str = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def _lowercase ( lowerCamelCase__ , *, lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : int = traverse_util.flatten_dict(variables["target"] )
__UpperCAmelCase : str = {"/".join(lowerCamelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase : Optional[Any] = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , lowerCamelCase__ )
__UpperCAmelCase : str = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase : Optional[int] = old["token_embedder/embedding"]
# Encoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : List[str] = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_attention_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "attention" )
__UpperCAmelCase : Union[str, Any] = layer_norm
__UpperCAmelCase : Any = k.T
__UpperCAmelCase : Union[str, Any] = o.T
__UpperCAmelCase : Any = q.T
__UpperCAmelCase : Any = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase : str = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_mlp_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase : Tuple = tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , lowerCamelCase__ )
__UpperCAmelCase : Dict = layer_norm
if split_mlp_wi:
__UpperCAmelCase : Any = wi[0].T
__UpperCAmelCase : Optional[int] = wi[1].T
else:
__UpperCAmelCase : List[Any] = wi.T
__UpperCAmelCase : Tuple = wo.T
__UpperCAmelCase : List[Any] = old[
"encoder/relpos_bias/rel_embedding"
].T
__UpperCAmelCase : Any = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_self_attention_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "self_attention" )
__UpperCAmelCase : Optional[int] = layer_norm
__UpperCAmelCase : Union[str, Any] = k.T
__UpperCAmelCase : List[Any] = o.T
__UpperCAmelCase : Union[str, Any] = q.T
__UpperCAmelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase : Optional[Any] = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_cross_attention_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "encoder_decoder_attention" )
__UpperCAmelCase : List[Any] = layer_norm
__UpperCAmelCase : Union[str, Any] = k.T
__UpperCAmelCase : int = o.T
__UpperCAmelCase : List[str] = q.T
__UpperCAmelCase : str = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase : Any = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_mlp_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , lowerCamelCase__ )
__UpperCAmelCase : str = layer_norm
if split_mlp_wi:
__UpperCAmelCase : Any = wi[0].T
__UpperCAmelCase : Dict = wi[1].T
else:
__UpperCAmelCase : List[Any] = wi.T
__UpperCAmelCase : int = wo.T
__UpperCAmelCase : List[str] = old["decoder/decoder_norm/scale"]
__UpperCAmelCase : Dict = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase : str = old["decoder/logits_dense/kernel"].T
return new
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : Optional[int] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__UpperCAmelCase : Tuple = state_dict["shared.weight"]
return state_dict
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(lowerCamelCase__ )
__UpperCAmelCase : Any = convert_tax_to_pytorch(lowerCamelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = make_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Dict = TaConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase : str = TaEncoderModel(lowerCamelCase__ )
else:
__UpperCAmelCase : Optional[Any] = TaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCamelCase__ )
print("Done" )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
_a : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 1 |
'''simple docstring'''
import math
class __A :
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = 0.0
__UpperCAmelCase : List[str] = 0.0
for i in range(len(UpperCamelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
for i in range(len(UpperCamelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowercase ( ) -> None:
"""simple docstring"""
__UpperCAmelCase : Any = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__UpperCAmelCase : Union[str, Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__UpperCAmelCase : str = SelfOrganizingMap()
__UpperCAmelCase : Any = 3
__UpperCAmelCase : Union[str, Any] = 0.5
for _ in range(lowerCamelCase__ ):
for j in range(len(lowerCamelCase__ ) ):
# training sample
__UpperCAmelCase : Optional[Any] = training_samples[j]
# Compute the winning vector
__UpperCAmelCase : Tuple = self_organizing_map.get_winner(lowerCamelCase__ , lowerCamelCase__ )
# Update the winning vector
__UpperCAmelCase : Dict = self_organizing_map.update(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# classify test sample
__UpperCAmelCase : Any = [0, 0, 0, 1]
__UpperCAmelCase : Any = self_organizing_map.get_winner(lowerCamelCase__ , lowerCamelCase__ )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __A (unittest.TestCase ):
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
__UpperCAmelCase : Optional[int] = load_dataset("ashraq/esc50" )
__UpperCAmelCase : Union[str, Any] = dataset["train"]["audio"][-1]["array"]
__UpperCAmelCase : List[Any] = audio_classifier(UpperCamelCase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [{"score": 0.5_0_1, "label": "Sound of a dog"}, {"score": 0.4_9_9, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def _snake_case ( self ):
pass
@slow
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
__UpperCAmelCase : Optional[int] = load_dataset("ashraq/esc50" )
__UpperCAmelCase : Optional[int] = dataset["train"]["audio"][-1]["array"]
__UpperCAmelCase : List[str] = audio_classifier(UpperCamelCase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
__UpperCAmelCase : Optional[int] = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def _snake_case ( self ):
pass
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "sew"
def __init__( self , UpperCamelCase_=32 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_=2 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_="group" , UpperCamelCase_="gelu" , UpperCamelCase_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase_=False , UpperCamelCase_=1_28 , UpperCamelCase_=16 , UpperCamelCase_=True , UpperCamelCase_=0.0_5 , UpperCamelCase_=10 , UpperCamelCase_=2 , UpperCamelCase_=0.0 , UpperCamelCase_=10 , UpperCamelCase_=0 , UpperCamelCase_="mean" , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2_56 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Tuple = feat_extract_norm
__UpperCAmelCase : Optional[int] = feat_extract_activation
__UpperCAmelCase : Dict = list(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = list(UpperCamelCase_ )
__UpperCAmelCase : str = list(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = conv_bias
__UpperCAmelCase : Any = num_conv_pos_embeddings
__UpperCAmelCase : Optional[int] = num_conv_pos_embedding_groups
__UpperCAmelCase : List[str] = len(self.conv_dim )
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Tuple = squeeze_factor
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : int = hidden_dropout
__UpperCAmelCase : List[str] = attention_dropout
__UpperCAmelCase : List[str] = activation_dropout
__UpperCAmelCase : str = feat_proj_dropout
__UpperCAmelCase : Union[str, Any] = final_dropout
__UpperCAmelCase : Dict = layerdrop
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[Any] = apply_spec_augment
__UpperCAmelCase : int = mask_time_prob
__UpperCAmelCase : Optional[int] = mask_time_length
__UpperCAmelCase : Tuple = mask_time_min_masks
__UpperCAmelCase : Dict = mask_feature_prob
__UpperCAmelCase : Optional[int] = mask_feature_length
__UpperCAmelCase : List[str] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : List[Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : List[Any] = classifier_proj_size
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 10 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ) ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__UpperCAmelCase : int = (
"Wrong input data's dimensions... "
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowerCamelCase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowerCamelCase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
__UpperCAmelCase : List[Any] = (
"Input data have different datatype... "
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowerCamelCase__ )
__UpperCAmelCase : str = []
for value in value_array:
__UpperCAmelCase : int = euclidean(lowerCamelCase__ , dataset[0] )
__UpperCAmelCase : str = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCAmelCase : Optional[int] = euclidean(lowerCamelCase__ , lowerCamelCase__ )
if dist > temp_dist:
__UpperCAmelCase : List[str] = temp_dist
__UpperCAmelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
return np.dot(lowerCamelCase__ , lowerCamelCase__ ) / (norm(lowerCamelCase__ ) * norm(lowerCamelCase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 1 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A (unittest.TestCase ):
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = torch.nn.Linear(10 , 10 )
__UpperCAmelCase : Any = torch.optim.SGD(model.parameters() , 0.1 )
__UpperCAmelCase : Optional[int] = Accelerator()
__UpperCAmelCase : List[str] = accelerator.prepare(UpperCamelCase_ )
try:
pickle.loads(pickle.dumps(UpperCamelCase_ ) )
except Exception as e:
self.fail(f"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 10 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=10 , UpperCamelCase_=3 , UpperCamelCase_=32 * 8 , UpperCamelCase_=32 * 8 , UpperCamelCase_=4 , UpperCamelCase_=64 , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Tuple = use_auxiliary_loss
__UpperCAmelCase : Optional[int] = num_queries
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Optional[Any] = min_size
__UpperCAmelCase : Dict = max_size
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : List[Any] = hidden_dim
__UpperCAmelCase : str = hidden_dim
def _snake_case ( self ):
__UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase_ )
__UpperCAmelCase : int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ )
__UpperCAmelCase : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5
).float()
__UpperCAmelCase : Tuple = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long()
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCAmelCase : Union[str, Any] = self.num_queries
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : int = [1, 1, 1, 1]
__UpperCAmelCase : Tuple = self.num_channels
__UpperCAmelCase : Union[str, Any] = 64
__UpperCAmelCase : List[str] = 1_28
__UpperCAmelCase : Tuple = self.hidden_dim
__UpperCAmelCase : List[Any] = self.hidden_dim
__UpperCAmelCase : Dict = self.hidden_dim
return config
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase : List[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = output.encoder_hidden_states
__UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
__UpperCAmelCase : Optional[int] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_layers )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
with torch.no_grad():
__UpperCAmelCase : Any = MaskaFormerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : Any = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = MaskaFormerForUniversalSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
def comm_check_on_output(UpperCamelCase_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCAmelCase : List[str] = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = model(UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
__UpperCAmelCase : Dict = model(
pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :List[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case :Tuple = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
snake_case :List[str] = False
snake_case :Optional[int] = False
snake_case :Tuple = False
snake_case :Optional[Any] = False
def _snake_case ( self ):
__UpperCAmelCase : int = MaskaFormerModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCamelCase_ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _snake_case ( self ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _snake_case ( self ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _snake_case ( self ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _snake_case ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _snake_case ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Tuple = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@slow
def _snake_case ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCAmelCase : Dict = MaskaFormerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Any = (self.model_tester.min_size,) * 2
__UpperCAmelCase : str = {
"pixel_values": torch.randn((2, 3, *size) , device=UpperCamelCase_ ),
"mask_labels": torch.randn((2, 10, *size) , device=UpperCamelCase_ ),
"class_labels": torch.zeros(2 , 10 , device=UpperCamelCase_ ).long(),
}
__UpperCAmelCase : int = self.model_tester.get_config()
__UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(UpperCamelCase_ ).to(UpperCamelCase_ )
__UpperCAmelCase : int = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self ):
if not self.model_tester.is_training:
return
__UpperCAmelCase : Any = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
__UpperCAmelCase : Optional[int] = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss
loss.backward()
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.all_model_classes[1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : int = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[Any] = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
model.train()
__UpperCAmelCase : Dict = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCAmelCase : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCAmelCase : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_a : Tuple = 1e-4
def _lowercase ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __A (unittest.TestCase ):
@cached_property
def _snake_case ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _snake_case ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _snake_case ( self ):
__UpperCAmelCase : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.default_image_processor
__UpperCAmelCase : List[Any] = prepare_img()
__UpperCAmelCase : Tuple = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
__UpperCAmelCase : Any = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
__UpperCAmelCase : Dict = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ ).eval()
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ )
__UpperCAmelCase : Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**UpperCamelCase_ )
# masks_queries_logits
__UpperCAmelCase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCAmelCase : Dict = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
__UpperCAmelCase : Optional[int] = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
# class_queries_logits
__UpperCAmelCase : Any = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ ).eval()
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
__UpperCAmelCase : Any = inputs["pixel_values"].to(UpperCamelCase_ )
__UpperCAmelCase : int = [el.to(UpperCamelCase_ ) for el in inputs["mask_labels"]]
__UpperCAmelCase : int = [el.to(UpperCamelCase_ ) for el in inputs["class_labels"]]
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
__UpperCAmelCase : Tuple = Dataset.from_dict(lowerCamelCase__ )
return dataset
class __A (__magic_name__ ):
def _snake_case ( self ):
__UpperCAmelCase : Any = get_dataset()
__UpperCAmelCase : List[Any] = make_duplicate_clusters(UpperCamelCase_ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _snake_case ( self ):
__UpperCAmelCase : Tuple = get_dataset()
__UpperCAmelCase , __UpperCAmelCase : Tuple = deduplicate_dataset(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
print(UpperCamelCase_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCamelCase_ )
| 10 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Any = logging.getLogger(__name__)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __A :
snake_case :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __A :
snake_case :str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
snake_case :str = field(metadata={"help": "Should contain the data files for the task."} )
snake_case :int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case :bool = field(
default=__magic_name__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCamelCase__ )
# Set seed
set_seed(training_args.seed )
try:
__UpperCAmelCase : Tuple = processors[data_args.task_name]()
__UpperCAmelCase : str = processor.get_labels()
__UpperCAmelCase : str = len(lowerCamelCase__ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCAmelCase : int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCAmelCase : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase__ ) -> Dict:
__UpperCAmelCase : int = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase__ , p.label_ids )}
# Data collator
__UpperCAmelCase : Optional[Any] = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCAmelCase : Dict = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase : Optional[int] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : int = trainer.evaluate()
__UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(lowerCamelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCamelCase__ , lowerCamelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCamelCase__ )
return results
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 10 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_a : int = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCAmelCase : List[Any] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase__ , id=lowerCamelCase__ )
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __A (__magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self , UpperCamelCase_ = 7_68 , ):
super().__init__()
__UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
__UpperCAmelCase : Dict = nn.Parameter(torch.ones(1 , UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
__UpperCAmelCase : int = nn.Parameter(self.mean.to(UpperCamelCase_ ).to(UpperCamelCase_ ) )
__UpperCAmelCase : Optional[int] = nn.Parameter(self.std.to(UpperCamelCase_ ).to(UpperCamelCase_ ) )
return self
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = (embeds * self.std) + self.mean
return embeds
| 10 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : List[Any] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class __A (_lowerCamelCase ):
snake_case :Optional[Any] = "timesformer"
def __init__( self , UpperCamelCase_=2_24 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=8 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-6 , UpperCamelCase_=True , UpperCamelCase_="divided_space_time" , UpperCamelCase_=0 , **UpperCamelCase_ , ):
super().__init__(**A__ )
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : List[Any] = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : Union[str, Any] = num_frames
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : Any = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : Tuple = qkv_bias
__UpperCAmelCase : List[str] = attention_type
__UpperCAmelCase : List[Any] = drop_path_rate
| 700 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ = 10**9 ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : List[Any] = 2
__UpperCAmelCase : int = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__UpperCAmelCase : List[str] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 701 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[str] = logging.get_logger(__name__)
_a : int = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class __A (snake_case__ , snake_case__ ):
snake_case :Any = """focalnet"""
def __init__( self , UpperCamelCase_=2_24 , UpperCamelCase_=4 , UpperCamelCase_=3 , UpperCamelCase_=96 , UpperCamelCase_=False , UpperCamelCase_=[1_92, 3_84, 7_68, 7_68] , UpperCamelCase_=[2, 2, 6, 2] , UpperCamelCase_=[2, 2, 2, 2] , UpperCamelCase_=[3, 3, 3, 3] , UpperCamelCase_="gelu" , UpperCamelCase_=4.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=False , UpperCamelCase_=1E-4 , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_=32 , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(**UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : List[Any] = embed_dim
__UpperCAmelCase : str = use_conv_embed
__UpperCAmelCase : Optional[Any] = hidden_sizes
__UpperCAmelCase : Optional[Any] = depths
__UpperCAmelCase : List[str] = focal_levels
__UpperCAmelCase : Union[str, Any] = focal_windows
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[str] = drop_path_rate
__UpperCAmelCase : int = use_layerscale
__UpperCAmelCase : Optional[Any] = layerscale_value
__UpperCAmelCase : int = use_post_layernorm
__UpperCAmelCase : Union[str, Any] = use_post_layernorm_in_modulation
__UpperCAmelCase : Tuple = normalize_modulator
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : str = encoder_stride
__UpperCAmelCase : Any = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names )
| 702 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case :Optional[Any] = RobertaTokenizer
snake_case :Union[str, Any] = RobertaTokenizerFast
snake_case :Tuple = True
snake_case :Any = {"cls_token": "<s>"}
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__UpperCAmelCase : Optional[Any] = dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCAmelCase : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__UpperCAmelCase : Any = {"unk_token": "<unk>"}
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
def _snake_case ( self , **UpperCamelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def _snake_case ( self , **UpperCamelCase_ ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "lower newer"
__UpperCAmelCase : Dict = "lower newer"
return input_text, output_text
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase : List[str] = "lower newer"
__UpperCAmelCase : List[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(A_ ) # , add_prefix_space=True)
self.assertListEqual(A_ , A_ )
__UpperCAmelCase : Optional[int] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=A_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=A_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained("roberta-base" )
__UpperCAmelCase : List[Any] = tokenizer.encode("sequence builders" , add_special_tokens=A_ )
__UpperCAmelCase : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=A_ )
__UpperCAmelCase : Optional[int] = tokenizer.encode(
"sequence builders" , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCAmelCase : List[str] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : Dict = "Encode this sequence."
__UpperCAmelCase : Any = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A_ , A_ )
__UpperCAmelCase : int = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
__UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A_ , A_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
__UpperCAmelCase : str = tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A_ , A_ )
# Testing spaces after special tokens
__UpperCAmelCase : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(A_ , lstrip=A_ , rstrip=A_ )} ) # mask token has a left space
__UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A_ )
__UpperCAmelCase : Union[str, Any] = "Encode <mask> sequence"
__UpperCAmelCase : Union[str, Any] = "Encode <mask>sequence"
__UpperCAmelCase : Optional[int] = tokenizer.encode(A_ )
__UpperCAmelCase : Dict = encoded.index(A_ )
__UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A_ , A_ )
__UpperCAmelCase : Tuple = tokenizer.encode(A_ )
__UpperCAmelCase : Dict = encoded.index(A_ )
__UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A_ , A_ )
def _snake_case ( self ):
pass
def _snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCAmelCase : List[str] = "A, <mask> AllenNLP sentence."
__UpperCAmelCase : Optional[Any] = tokenizer_r.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
__UpperCAmelCase : Dict = tokenizer_p.encode_plus(A_ , add_special_tokens=A_ , return_token_type_ids=A_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__UpperCAmelCase : int = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__UpperCAmelCase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
A_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _snake_case ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
__UpperCAmelCase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , A_ )
self.assertEqual(post_processor_state["add_prefix_space"] , A_ )
self.assertEqual(post_processor_state["trim_offsets"] , A_ )
def _snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCAmelCase : List[str] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase : List[str] = f"""{text_of_1_token} {text_of_1_token}"""
__UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
__UpperCAmelCase : str = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
__UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
__UpperCAmelCase : List[str] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) , )
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
__UpperCAmelCase : Tuple = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
__UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
__UpperCAmelCase : Optional[Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A_ ), len(A_ ) + 1 + len(A_ )) , )
__UpperCAmelCase : Dict = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
__UpperCAmelCase : List[Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ) + 1, 1 + len(A_ ) + 1 + len(A_ )) , )
__UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
__UpperCAmelCase : Union[str, Any] = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
__UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
A_ , use_fast=A_ , add_prefix_space=A_ , trim_offsets=A_ )
__UpperCAmelCase : int = tokenizer_r(A_ , return_offsets_mapping=A_ , add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) , )
| 703 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
_a : List[Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_a : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_a : Dict = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 704 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_a : List[Any] = logging.getLogger()
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[Any] = os.path.join(_A , "all_results.json" )
if os.path.exists(_A ):
with open(_A , "r" ) as f:
__UpperCAmelCase : Optional[Any] = json.load(_A )
else:
raise ValueError(f"""can't find {path}""" )
return results
_a : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __A (lowercase__ ):
def _snake_case ( self ):
import xla_spawn
__UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__UpperCAmelCase : str = f"""\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n """.split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
__UpperCAmelCase : List[str] = time()
xla_spawn.main()
__UpperCAmelCase : int = time()
__UpperCAmelCase : int = get_results(__lowerCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def _snake_case ( self ):
import xla_spawn
__UpperCAmelCase : List[Any] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(__lowerCamelCase , "argv" , __lowerCamelCase ):
xla_spawn.main()
| 705 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : List[str] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __A (__lowerCamelCase ):
snake_case :Optional[int] = "distilbert"
snake_case :str = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=5_12 , UpperCamelCase_=False , UpperCamelCase_=6 , UpperCamelCase_=12 , UpperCamelCase_=7_68 , UpperCamelCase_=4 * 7_68 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0_2 , UpperCamelCase_=0.1 , UpperCamelCase_=0.2 , UpperCamelCase_=0 , **UpperCamelCase_ , ):
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : Dict = sinusoidal_pos_embds
__UpperCAmelCase : List[str] = n_layers
__UpperCAmelCase : Union[str, Any] = n_heads
__UpperCAmelCase : Any = dim
__UpperCAmelCase : Any = hidden_dim
__UpperCAmelCase : Union[str, Any] = dropout
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : Union[str, Any] = activation
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : str = qa_dropout
__UpperCAmelCase : str = seq_classif_dropout
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ )
class __A (__lowerCamelCase ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 706 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Tuple = logging.get_logger(__name__)
_a : List[Any] = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __A (UpperCAmelCase_ ):
snake_case :List[Any] = 'blip_2_vision_model'
def __init__( self , UpperCamelCase_=14_08 , UpperCamelCase_=61_44 , UpperCamelCase_=39 , UpperCamelCase_=16 , UpperCamelCase_=2_24 , UpperCamelCase_=14 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0_0_0_0_1 , UpperCamelCase_=0.0 , UpperCamelCase_=1E-10 , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(**_lowercase )
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Any = patch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : str = attention_dropout
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Optional[int] = qkv_bias
@classmethod
def _snake_case ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
cls._set_token_in_kwargs(_lowercase )
__UpperCAmelCase : Any = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
__UpperCAmelCase : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
class __A (UpperCAmelCase_ ):
snake_case :int = 'blip_2_qformer'
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_="absolute" , UpperCamelCase_=2 , UpperCamelCase_=14_08 , **UpperCamelCase_ , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Tuple = layer_norm_eps
__UpperCAmelCase : Optional[int] = position_embedding_type
__UpperCAmelCase : List[str] = cross_attention_frequency
__UpperCAmelCase : Optional[Any] = encoder_hidden_size
@classmethod
def _snake_case ( cls , UpperCamelCase_ , **UpperCamelCase_ ):
cls._set_token_in_kwargs(_lowercase )
__UpperCAmelCase : str = cls.get_config_dict(_lowercase , **_lowercase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
__UpperCAmelCase : Union[str, Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
class __A (UpperCAmelCase_ ):
snake_case :Optional[Any] = 'blip-2'
snake_case :str = True
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=32 , **UpperCamelCase_ ):
super().__init__(**_lowercase )
if vision_config is None:
__UpperCAmelCase : Tuple = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
__UpperCAmelCase : str = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
__UpperCAmelCase : Tuple = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
__UpperCAmelCase : List[str] = BlipaVisionConfig(**_lowercase )
__UpperCAmelCase : Union[str, Any] = BlipaQFormerConfig(**_lowercase )
__UpperCAmelCase : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCAmelCase : Any = CONFIG_MAPPING[text_model_type](**_lowercase )
__UpperCAmelCase : int = self.text_config.tie_word_embeddings
__UpperCAmelCase : int = self.text_config.is_encoder_decoder
__UpperCAmelCase : Tuple = num_query_tokens
__UpperCAmelCase : Tuple = self.vision_config.hidden_size
__UpperCAmelCase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCAmelCase : Any = 1.0
__UpperCAmelCase : Union[str, Any] = 0.0_2
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowercase , )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : int = self.vision_config.to_dict()
__UpperCAmelCase : Any = self.qformer_config.to_dict()
__UpperCAmelCase : Optional[int] = self.text_config.to_dict()
__UpperCAmelCase : str = self.__class__.model_type
return output
| 707 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Dict = len(__A ), len(grid[0] )
if (
min(__A , __A ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__UpperCAmelCase : Any = 0
count += depth_first_search(__A , row + 1 , __A , __A )
count += depth_first_search(__A , row - 1 , __A , __A )
count += depth_first_search(__A , __A , col + 1 , __A )
count += depth_first_search(__A , __A , col - 1 , __A )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a : Dict = logging.get_logger(__name__)
_a : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_a : Optional[int] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__UpperCAmelCase : Union[str, Any] = "lm_head"
__UpperCAmelCase : Optional[int] = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
__UpperCAmelCase : Union[str, Any] = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
__UpperCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCAmelCase : int = value
elif weight_type == "weight_g":
__UpperCAmelCase : Tuple = value
elif weight_type == "weight_v":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "bias":
__UpperCAmelCase : Optional[Any] = value
else:
__UpperCAmelCase : Dict = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = fairseq_model.state_dict()
__UpperCAmelCase : List[str] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCAmelCase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : str = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__UpperCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
__UpperCAmelCase : Union[str, Any] = name.split(__lowerCAmelCase )[0].split("." )[-2]
__UpperCAmelCase : str = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
__UpperCAmelCase : List[Any] = "weight_g"
elif "weight_v" in name:
__UpperCAmelCase : List[Any] = "weight_v"
elif "bias" in name:
__UpperCAmelCase : str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : Any = "weight"
else:
__UpperCAmelCase : Any = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : int = full_name.split("conv_layers." )[-1]
__UpperCAmelCase : Tuple = name.split("." )
__UpperCAmelCase : List[str] = int(items[0] )
__UpperCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCAmelCase : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCAmelCase : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCAmelCase : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCAmelCase : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ) -> str:
"""simple docstring"""
if config_path is not None:
__UpperCAmelCase : Optional[int] = UniSpeechConfig.from_pretrained(__lowerCAmelCase )
else:
__UpperCAmelCase : List[str] = UniSpeechConfig()
if is_finetuned:
if dict_path:
__UpperCAmelCase : List[Any] = Dictionary.load_from_json(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : Optional[Any] = target_dict.pad_index
__UpperCAmelCase : str = target_dict.bos_index
__UpperCAmelCase : Any = target_dict.eos_index
__UpperCAmelCase : List[str] = len(target_dict.symbols )
__UpperCAmelCase : List[str] = os.path.join(__lowerCAmelCase , "vocab.json" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
__UpperCAmelCase : int = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase : Tuple = 42
__UpperCAmelCase : int = 43
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase : int = WavaVecaPhonemeCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCAmelCase , )
__UpperCAmelCase : Dict = True if config.feat_extract_norm == "layer" else False
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
__UpperCAmelCase : Dict = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
__UpperCAmelCase : Optional[int] = UniSpeechForCTC(__lowerCAmelCase )
else:
__UpperCAmelCase : Dict = UniSpeechForPreTraining(__lowerCAmelCase )
if is_finetuned:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCAmelCase : Optional[int] = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_unispeech.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_a : Tuple = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 709 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Optional[Any] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A (unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=3 , UpperCamelCase_=2_24 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ):
__UpperCAmelCase : Optional[int] = size if size is not None else {"""height""": 18, """width""": 18}
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : Optional[int] = min_resolution
__UpperCAmelCase : Any = max_resolution
__UpperCAmelCase : Tuple = do_resize
__UpperCAmelCase : int = size
__UpperCAmelCase : Union[str, Any] = do_normalize
__UpperCAmelCase : Any = image_mean
__UpperCAmelCase : str = image_std
def _snake_case ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A (UpperCamelCase_ , unittest.TestCase ):
snake_case :Dict = ViTImageProcessor if is_vision_available() else None
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def _snake_case ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _snake_case ( self ):
__UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__UpperCAmelCase : int = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processor(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__UpperCAmelCase : int = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processor(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _snake_case ( self ):
__UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__UpperCAmelCase : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : List[str] = image_processor(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 711 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
__UpperCAmelCase : str = int(__UpperCAmelCase )
__UpperCAmelCase : Any = int(__UpperCAmelCase )
__UpperCAmelCase : List[str] = []
for temp in range(int(__UpperCAmelCase ) ):
series.append(f"""1 / {pow(temp + 1 , int(__UpperCAmelCase ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : List[str] = int(input("Enter the last number (nth term) of the P-Series"))
_a : Any = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 712 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__UpperCAmelCase : int = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCAmelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__A , __A ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
__UpperCAmelCase : Optional[int] = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__A )
else:
__UpperCAmelCase : Optional[Any] = sylvester(number - 1 )
__UpperCAmelCase : Tuple = num - 1
__UpperCAmelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 714 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(_lowerCamelCase ):
if len(_lowerCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_lowerCamelCase ) )
return data_lists
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : list[list[float]] = []
for dlist, weight in zip(_lowerCamelCase , _lowerCamelCase ):
__UpperCAmelCase : List[str] = min(_lowerCamelCase )
__UpperCAmelCase : Optional[int] = max(_lowerCamelCase )
__UpperCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__UpperCAmelCase : Any = f"""Invalid weight of {weight:f} provided"""
raise ValueError(_lowerCamelCase )
score_lists.append(_lowerCamelCase )
return score_lists
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_lowerCamelCase ):
__UpperCAmelCase : List[Any] = final_scores[j] + ele
return final_scores
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : str = get_data(_lowerCamelCase )
__UpperCAmelCase : str = calculate_each_score(_lowerCamelCase , _lowerCamelCase )
__UpperCAmelCase : Any = generate_final_scores(_lowerCamelCase )
# append scores to source data
for i, ele in enumerate(_lowerCamelCase ):
source_data[i].append(_lowerCamelCase )
return source_data
| 715 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
_a : int = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Any = [False] * len(_lowerCamelCase )
__UpperCAmelCase : str = [s]
__UpperCAmelCase : List[str] = True
while queue:
__UpperCAmelCase : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : int = u
return visited[t]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : str = [-1] * (len(_lowerCamelCase ))
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__UpperCAmelCase : Optional[Any] = float("Inf" )
__UpperCAmelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
__UpperCAmelCase : Optional[int] = min(_lowerCamelCase , graph[parent[s]][s] )
__UpperCAmelCase : Optional[Any] = parent[s]
max_flow += path_flow
__UpperCAmelCase : Dict = sink
while v != source:
__UpperCAmelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCAmelCase : Optional[Any] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 716 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=8 ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __A (_UpperCAmelCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
super().__init__()
self.register_modules(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , )
__UpperCAmelCase : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if latents is None:
__UpperCAmelCase : Union[str, Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__UpperCAmelCase : List[str] = latents.to(lowerCamelCase_ )
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self , UpperCamelCase_=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__UpperCAmelCase : Union[str, Any] = torch.device(f"""cuda:{gpu_id}""" )
__UpperCAmelCase : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ , lowerCamelCase_ )
def _snake_case ( self , UpperCamelCase_=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__UpperCAmelCase : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase : Dict = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
__UpperCAmelCase : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 1_00 , UpperCamelCase_ = 4.0 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ):
__UpperCAmelCase : Any = self._execution_device
__UpperCAmelCase : Optional[int] = guidance_scale > 1.0
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__UpperCAmelCase : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 )
__UpperCAmelCase : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__UpperCAmelCase : List[str] = torch.cat(lowerCamelCase_ , dim=0 )
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
__UpperCAmelCase : str = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
__UpperCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ )
__UpperCAmelCase : str = self.scheduler.timesteps
__UpperCAmelCase : Optional[Any] = self.unet.config.in_channels
__UpperCAmelCase : List[str] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor )
# create initial latent
__UpperCAmelCase : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds}
__UpperCAmelCase : Optional[int] = self.unet(
sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
__UpperCAmelCase : List[str] = noise_pred.chunk(2 )
__UpperCAmelCase : Any = variance_pred.chunk(2 )
__UpperCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0]
# post-processing
__UpperCAmelCase : int = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
__UpperCAmelCase : Union[str, Any] = image * 0.5 + 0.5
__UpperCAmelCase : Dict = image.clamp(0 , 1 )
__UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 717 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=10 , UpperCamelCase_=3 , UpperCamelCase_=2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.0_2 , UpperCamelCase_="divided_space_time" , UpperCamelCase_=None , ):
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : int = num_frames
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = attention_type
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Any = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__UpperCAmelCase : List[str] = (image_size // patch_size) ** 2
__UpperCAmelCase : Tuple = (num_frames) * self.num_patches_per_frame + 1
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
__UpperCAmelCase : List[str] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__UpperCAmelCase : Optional[int] = self.num_labels
return config
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = TimesformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = TimesformerForVideoClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__UpperCamelCase )
# verify the logits shape
__UpperCAmelCase : Union[str, Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __UpperCamelCase )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __A (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
snake_case :Union[str, Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
snake_case :Tuple = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
snake_case :Optional[Any] = False
snake_case :int = False
snake_case :List[Any] = False
snake_case :str = False
def _snake_case ( self ):
__UpperCAmelCase : Dict = TimesformerModelTester(self )
__UpperCAmelCase : Union[str, Any] = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
__UpperCAmelCase : str = copy.deepcopy(__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
__UpperCAmelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def _snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(__UpperCamelCase )
__UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Tuple = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self ):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__UpperCamelCase )
@slow
def _snake_case ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = TimesformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self ):
if not self.has_attentions:
pass
else:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : int = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = self.model_tester.seq_length
__UpperCAmelCase : Union[str, Any] = self.model_tester.num_frames
__UpperCAmelCase : str = True
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = True
__UpperCAmelCase : List[str] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__UpperCAmelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : str = True
__UpperCAmelCase : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__UpperCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__UpperCAmelCase : str = len(__UpperCamelCase )
# Check attention is always last and order is fine
__UpperCAmelCase : int = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCamelCase ) )
__UpperCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _snake_case ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : int = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__UpperCAmelCase : Optional[int] = outputs.hidden_states
__UpperCAmelCase : int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
__UpperCAmelCase : Dict = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__UpperCAmelCase : List[str] = np.load(_A )
return list(_A )
@require_torch
@require_vision
class __A (unittest.TestCase ):
@cached_property
def _snake_case ( self ):
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__UpperCamelCase )
__UpperCAmelCase : Optional[int] = self.default_image_processor
__UpperCAmelCase : Optional[int] = prepare_video()
__UpperCAmelCase : Dict = image_processor(video[:8] , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**__UpperCamelCase )
# verify the logits
__UpperCAmelCase : Any = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__UpperCAmelCase : Any = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 718 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a : int = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return (data["data"], data["target"])
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Dict = XGBClassifier()
classifier.fit(__UpperCamelCase , __UpperCamelCase )
return classifier
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = load_iris()
__UpperCAmelCase : Optional[Any] = data_handling(__UpperCamelCase )
__UpperCAmelCase : List[str] = train_test_split(
__UpperCamelCase , __UpperCamelCase , test_size=0.25 )
__UpperCAmelCase : List[str] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
__UpperCAmelCase : List[str] = xgboost(__UpperCamelCase , __UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , display_labels=__UpperCamelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 720 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 0 |
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_a : Optional[int] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_a : Tuple = "main"
# Default branch name
_a : int = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
_a : Optional[int] = "aaaaaaa"
# This commit does not exist, so we should 404.
_a : Tuple = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
_a : Optional[Any] = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def _lowercase ( ) -> Any:
"""simple docstring"""
print("Bonjour!" )
yield
print("Au revoir!" )
class __A (unittest.TestCase ):
def _snake_case ( self ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class __A (unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _snake_case ( self , UpperCamelCase_ ):
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _snake_case ( self , UpperCamelCase_ ):
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def _snake_case ( self , UpperCamelCase_ ):
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def _snake_case ( self ):
self.assertEqual(find_labels(A_ ) , ["labels"] )
self.assertEqual(find_labels(A_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(A_ ) , ["start_positions", "end_positions"] )
class __A (_lowercase ):
pass
self.assertEqual(find_labels(A_ ) , ["labels"] )
@require_tf
def _snake_case ( self ):
self.assertEqual(find_labels(A_ ) , ["labels"] )
self.assertEqual(find_labels(A_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(A_ ) , ["start_positions", "end_positions"] )
class __A (_lowercase ):
pass
self.assertEqual(find_labels(A_ ) , ["labels"] )
@require_flax
def _snake_case ( self ):
# Flax models don't have labels
self.assertEqual(find_labels(A_ ) , [] )
self.assertEqual(find_labels(A_ ) , [] )
self.assertEqual(find_labels(A_ ) , [] )
class __A (_lowercase ):
pass
self.assertEqual(find_labels(A_ ) , [] )
| 721 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import numpy as np
def _lowercase ( lowerCamelCase__ ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _lowercase ( lowerCamelCase__ ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_a : Optional[Any] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_a : Dict = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_a : Optional[Any] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_a : List[str] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_a : Optional[int] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_a : int = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_a : Optional[int] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = randrange(len(lowerCamelCase__ ) ), randrange(len(lowerCamelCase__ ) )
__UpperCAmelCase : Optional[int] = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
__UpperCAmelCase , __UpperCAmelCase : List[str] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _lowercase ( lowerCamelCase__ = 100 ) -> Any:
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCamelCase__ ))
@pytest.mark.parametrize("hand, expected" , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(lowerCamelCase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(lowerCamelCase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = PokerHand(lowerCamelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(lowerCamelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(lowerCamelCase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(lowerCamelCase__ ).compare_with(PokerHand(lowerCamelCase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
assert PokerHand(lowerCamelCase__ ).compare_with(PokerHand(lowerCamelCase__ ) ) == expected
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [PokerHand(lowerCamelCase__ ) for hand in SORTED_HANDS]
__UpperCAmelCase : List[Any] = poker_hands.copy()
shuffle(lowerCamelCase__ )
__UpperCAmelCase : Any = chain(sorted(lowerCamelCase__ ) )
for index, hand in enumerate(lowerCamelCase__ ):
assert hand == poker_hands[index]
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowerCamelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _lowercase ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Tuple = PokerHand("2C 4S AS 3D 5C" )
__UpperCAmelCase : int = True
__UpperCAmelCase : Optional[Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[Any] = os.path.abspath(os.path.dirname(lowerCamelCase__ ) )
__UpperCAmelCase : Dict = os.path.join(lowerCamelCase__ , "poker_hands.txt" )
with open(lowerCamelCase__ ) as file_hand:
for line in file_hand:
__UpperCAmelCase : Union[str, Any] = line[:14].strip()
__UpperCAmelCase : Optional[int] = line[15:].strip()
__UpperCAmelCase , __UpperCAmelCase : Tuple = PokerHand(lowerCamelCase__ ), PokerHand(lowerCamelCase__ )
__UpperCAmelCase : str = player.compare_with(lowerCamelCase__ )
if output == "Win":
answer += 1
assert answer == 376
| 701 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __A (UpperCamelCase_ ):
snake_case :Optional[Any] = 42
class __A (UpperCamelCase_ , UpperCamelCase_ ):
@register_to_config
def __init__( self , UpperCamelCase_ = 32 , UpperCamelCase_ = 64 , UpperCamelCase_ = 20 , UpperCamelCase_ = 7_68 , UpperCamelCase_=77 , UpperCamelCase_=4 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = "silu" , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "linear" , UpperCamelCase_ = "prd" , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
super().__init__()
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Any = attention_head_dim
__UpperCAmelCase : Union[str, Any] = num_attention_heads * attention_head_dim
__UpperCAmelCase : Optional[Any] = additional_embeddings
__UpperCAmelCase : Tuple = time_embed_dim or inner_dim
__UpperCAmelCase : Optional[int] = embedding_proj_dim or embedding_dim
__UpperCAmelCase : Dict = clip_embed_dim or embedding_dim
__UpperCAmelCase : List[Any] = Timesteps(__A , __A , 0 )
__UpperCAmelCase : int = TimestepEmbedding(__A , __A , out_dim=__A , act_fn=__A )
__UpperCAmelCase : List[str] = nn.Linear(__A , __A )
if embedding_proj_norm_type is None:
__UpperCAmelCase : int = None
elif embedding_proj_norm_type == "layer":
__UpperCAmelCase : int = nn.LayerNorm(__A )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__UpperCAmelCase : List[str] = nn.Linear(__A , __A )
if encoder_hid_proj_type is None:
__UpperCAmelCase : str = None
elif encoder_hid_proj_type == "linear":
__UpperCAmelCase : Optional[Any] = nn.Linear(__A , __A )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__UpperCAmelCase : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __A ) )
if added_emb_type == "prd":
__UpperCAmelCase : int = nn.Parameter(torch.zeros(1 , 1 , __A ) )
elif added_emb_type is None:
__UpperCAmelCase : Tuple = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.""" )
__UpperCAmelCase : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
__A , __A , __A , dropout=__A , activation_fn="gelu" , attention_bias=__A , )
for d in range(__A )
] )
if norm_in_type == "layer":
__UpperCAmelCase : Any = nn.LayerNorm(__A )
elif norm_in_type is None:
__UpperCAmelCase : str = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__UpperCAmelCase : Optional[int] = nn.LayerNorm(__A )
__UpperCAmelCase : Optional[Any] = nn.Linear(__A , __A )
__UpperCAmelCase : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__UpperCAmelCase : int = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , __A , persistent=__A )
__UpperCAmelCase : Tuple = nn.Parameter(torch.zeros(1 , __A ) )
__UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.zeros(1 , __A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _snake_case ( self ):
__UpperCAmelCase : Tuple = {}
def fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(__A , "set_processor" ):
__UpperCAmelCase : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , __A , __A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__A , __A , __A )
return processors
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = len(self.attn_processors.keys() )
if isinstance(__A , __A ) and len(__A ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(__A )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(__A , "set_processor" ):
if not isinstance(__A , __A ):
module.set_processor(__A )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , __A , __A )
for name, module in self.named_children():
fn_recursive_attn_processor(__A , __A , __A )
def _snake_case ( self ):
self.set_attn_processor(AttnProcessor() )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , ):
__UpperCAmelCase : List[str] = hidden_states.shape[0]
__UpperCAmelCase : int = timestep
if not torch.is_tensor(__A ):
__UpperCAmelCase : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[Any] = timesteps * torch.ones(__A , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase : Any = self.time_proj(__A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCAmelCase : int = timesteps_projected.to(dtype=self.dtype )
__UpperCAmelCase : List[str] = self.time_embedding(__A )
if self.embedding_proj_norm is not None:
__UpperCAmelCase : List[str] = self.embedding_proj_norm(__A )
__UpperCAmelCase : str = self.embedding_proj(__A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCAmelCase : Any = self.encoder_hidden_states_proj(__A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
__UpperCAmelCase : List[str] = self.proj_in(__A )
__UpperCAmelCase : Union[str, Any] = self.positional_embedding.to(hidden_states.dtype )
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = 0
if encoder_hidden_states is not None:
additional_embeds.append(__A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCAmelCase : Tuple = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCAmelCase : Dict = hidden_states[:, None, :]
__UpperCAmelCase : Any = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCAmelCase : Any = self.prd_embedding.to(hidden_states.dtype ).expand(__A , -1 , -1 )
additional_embeds.append(__A )
__UpperCAmelCase : Dict = torch.cat(
__A , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCAmelCase : List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCAmelCase : Union[str, Any] = F.pad(
__A , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCAmelCase : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCAmelCase : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__UpperCAmelCase : Tuple = F.pad(__A , (0, self.additional_embeddings) , value=0.0 )
__UpperCAmelCase : Dict = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCAmelCase : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCAmelCase : Optional[int] = self.norm_in(__A )
for block in self.transformer_blocks:
__UpperCAmelCase : Union[str, Any] = block(__A , attention_mask=__A )
__UpperCAmelCase : Optional[int] = self.norm_out(__A )
if self.prd_embedding is not None:
__UpperCAmelCase : Dict = hidden_states[:, -1]
else:
__UpperCAmelCase : Optional[int] = hidden_states[:, additional_embeddings_len:]
__UpperCAmelCase : Union[str, Any] = self.proj_to_clip_embeddings(__A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__A )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 702 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _lowercase ( lowerCamelCase__="ro" , lowerCamelCase__="en" , lowerCamelCase__="wmt16" , lowerCamelCase__=None ) -> Optional[int]:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
__UpperCAmelCase : Any = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__UpperCAmelCase : str = datasets.load_dataset(_lowerCamelCase , _lowerCamelCase )
if save_dir is None:
__UpperCAmelCase : str = f"""{dataset}-{pair}"""
__UpperCAmelCase : List[Any] = Path(_lowerCamelCase )
save_dir.mkdir(exist_ok=_lowerCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__UpperCAmelCase : Union[str, Any] = "val" if split == "validation" else split
__UpperCAmelCase : Optional[int] = save_dir.joinpath(f"""{fn}.source""" )
__UpperCAmelCase : Tuple = save_dir.joinpath(f"""{fn}.target""" )
__UpperCAmelCase : Optional[Any] = src_path.open("w+" )
__UpperCAmelCase : str = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__UpperCAmelCase : Optional[Any] = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 703 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=10 , UpperCamelCase_=3 , UpperCamelCase_=32 * 8 , UpperCamelCase_=32 * 8 , UpperCamelCase_=4 , UpperCamelCase_=64 , ):
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : str = use_auxiliary_loss
__UpperCAmelCase : Optional[int] = num_queries
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : Optional[Any] = min_size
__UpperCAmelCase : Union[str, Any] = max_size
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Dict = hidden_dim
__UpperCAmelCase : Optional[int] = hidden_dim
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
__UpperCAmelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
__UpperCAmelCase : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
__UpperCAmelCase : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
__UpperCAmelCase : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self ):
__UpperCAmelCase : List[str] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__UpperCAmelCase : Optional[Any] = self.num_queries
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : Dict = [1, 1, 1, 1]
__UpperCAmelCase : str = self.num_channels
__UpperCAmelCase : List[Any] = 64
__UpperCAmelCase : Optional[Any] = 1_28
__UpperCAmelCase : List[str] = self.hidden_dim
__UpperCAmelCase : str = self.hidden_dim
__UpperCAmelCase : List[Any] = self.hidden_dim
return config
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase : int = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : int = output.encoder_hidden_states
__UpperCAmelCase : Tuple = output.pixel_decoder_hidden_states
__UpperCAmelCase : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
__UpperCAmelCase : Dict = model(pixel_values=_a , pixel_mask=_a )
__UpperCAmelCase : str = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(UpperCamelCase_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(pixel_values=_a , pixel_mask=_a )
__UpperCAmelCase : Dict = model(_a )
comm_check_on_output(_a )
__UpperCAmelCase : Union[str, Any] = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case :Dict = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case :str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
snake_case :Optional[int] = False
snake_case :str = False
snake_case :List[str] = False
snake_case :Tuple = False
def _snake_case ( self ):
__UpperCAmelCase : Tuple = MaskaFormerModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=_a , has_text_modality=_a )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def _snake_case ( self ):
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _snake_case ( self ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _snake_case ( self ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _snake_case ( self ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _snake_case ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _snake_case ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = model_class(_a )
__UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[str] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
@slow
def _snake_case ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__UpperCAmelCase : str = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = (self.model_tester.min_size,) * 2
__UpperCAmelCase : Any = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_a ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_a ),
"""class_labels""": torch.zeros(2 , 10 , device=_a ).long(),
}
__UpperCAmelCase : Optional[Any] = self.model_tester.get_config()
__UpperCAmelCase : List[str] = MaskaFormerForUniversalSegmentation(_a ).to(_a )
__UpperCAmelCase : int = model(**_a )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(_a ).to(_a )
__UpperCAmelCase : Optional[int] = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self ):
if not self.model_tester.is_training:
return
__UpperCAmelCase : List[Any] = self.all_model_classes[1]
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : Tuple = model_class(_a )
model.to(_a )
model.train()
__UpperCAmelCase : Dict = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.all_model_classes[1]
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : int = True
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Union[str, Any] = model_class(_a ).to(_a )
model.train()
__UpperCAmelCase : Dict = model(_a , mask_labels=_a , class_labels=_a )
__UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__UpperCAmelCase : List[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__UpperCAmelCase : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__UpperCAmelCase : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_a : Optional[int] = 1e-4
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __A (unittest.TestCase ):
@cached_property
def _snake_case ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _snake_case ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _snake_case ( self ):
__UpperCAmelCase : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(_a , return_tensors="pt" ).to(_a )
__UpperCAmelCase : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**_a )
__UpperCAmelCase : Tuple = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
__UpperCAmelCase : str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def _snake_case ( self ):
__UpperCAmelCase : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
__UpperCAmelCase : str = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : str = image_processor(_a , return_tensors="pt" ).to(_a )
__UpperCAmelCase : Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
__UpperCAmelCase : str = model(**_a )
# masks_queries_logits
__UpperCAmelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
__UpperCAmelCase : Any = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
__UpperCAmelCase : List[str] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__UpperCAmelCase : Dict = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
__UpperCAmelCase : Optional[int] = self.default_image_processor
__UpperCAmelCase : Tuple = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
__UpperCAmelCase : Optional[int] = inputs["""pixel_values"""].to(_a )
__UpperCAmelCase : int = [el.to(_a ) for el in inputs["""mask_labels"""]]
__UpperCAmelCase : Any = [el.to(_a ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**_a )
self.assertTrue(outputs.loss is not None )
| 704 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Optional[int] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Optional[Any] = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__a ):
snake_case :str = VOCAB_FILES_NAMES
snake_case :Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :str = ["input_ids", "attention_mask"]
snake_case :Tuple = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , unk_token=a_ , bos_token=a_ , eos_token=a_ , add_prefix_space=a_ , **a_ , )
if kwargs.pop("add_bos_token" , a_ ):
__UpperCAmelCase : Union[str, Any] = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(a_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Dict = add_prefix_space
__UpperCAmelCase : List[Any] = pre_tok_class(**a_ )
__UpperCAmelCase : List[str] = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_ , **a_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.get("is_split_into_words" , a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_ , **a_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ , **a_ , )
if truncate_before_pattern is not None and len(a_ ) > 0:
__UpperCAmelCase : int = self.truncate(a_ , a_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = pattern.search(a_ , a_ )
return m.start() if m else -1
__UpperCAmelCase : Optional[int] = [re.compile(a_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : List[str] = list(re.finditer("^print" , a_ , re.MULTILINE ) )
if len(a_ ) > 1:
__UpperCAmelCase : List[str] = completion[: prints[1].start()]
__UpperCAmelCase : List[Any] = list(re.finditer("^def" , a_ , re.MULTILINE ) )
if len(a_ ) > 1:
__UpperCAmelCase : Optional[int] = completion[: defs[1].start()]
__UpperCAmelCase : str = 0
__UpperCAmelCase : str = [
pos for pos in [find_re(a_ , a_ , a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 705 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 0 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
__UpperCAmelCase : Any = torch.load(hf_hub_download(repo_id=__snake_case , filename="pytorch_model.bin" ) )
__UpperCAmelCase : Optional[int] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
__UpperCAmelCase : Optional[int] = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
__UpperCAmelCase : List[Any] = tensor_value
__UpperCAmelCase : Tuple = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_a : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 706 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A (_UpperCAmelCase ):
snake_case :int = (CMStochasticIterativeScheduler,)
snake_case :int = 10
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.0_0_2,
"""sigma_max""": 8_0.0,
}
config.update(**lowercase_ )
return config
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 10
__UpperCAmelCase : Any = self.get_scheduler_config()
__UpperCAmelCase : Dict = self.scheduler_classes[0](**lowercase_ )
scheduler.set_timesteps(lowercase_ )
__UpperCAmelCase : Dict = scheduler.timesteps[0]
__UpperCAmelCase : Optional[int] = scheduler.timesteps[1]
__UpperCAmelCase : Optional[Any] = self.dummy_sample
__UpperCAmelCase : Any = 0.1 * sample
__UpperCAmelCase : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
__UpperCAmelCase : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def _snake_case ( self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowercase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.scheduler_classes[0]
__UpperCAmelCase : Any = self.get_scheduler_config()
__UpperCAmelCase : Dict = scheduler_class(**lowercase_ )
__UpperCAmelCase : Optional[int] = 1
scheduler.set_timesteps(lowercase_ )
__UpperCAmelCase : Any = scheduler.timesteps
__UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = self.dummy_model()
__UpperCAmelCase : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowercase_ ):
# 1. scale model input
__UpperCAmelCase : int = scheduler.scale_model_input(lowercase_ , lowercase_ )
# 2. predict noise residual
__UpperCAmelCase : Optional[int] = model(lowercase_ , lowercase_ )
# 3. predict previous sample x_t-1
__UpperCAmelCase : List[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
__UpperCAmelCase : str = pred_prev_sample
__UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowercase_ ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : int = self.scheduler_classes[0]
__UpperCAmelCase : List[Any] = self.get_scheduler_config()
__UpperCAmelCase : List[str] = scheduler_class(**lowercase_ )
__UpperCAmelCase : List[Any] = [1_06, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
__UpperCAmelCase : str = scheduler.timesteps
__UpperCAmelCase : Any = torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = self.dummy_model()
__UpperCAmelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__UpperCAmelCase : Any = scheduler.scale_model_input(lowercase_ , lowercase_ )
# 2. predict noise residual
__UpperCAmelCase : List[Any] = model(lowercase_ , lowercase_ )
# 3. predict previous sample x_t-1
__UpperCAmelCase : Optional[Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
__UpperCAmelCase : Dict = pred_prev_sample
__UpperCAmelCase : List[str] = torch.sum(torch.abs(lowercase_ ) )
__UpperCAmelCase : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCAmelCase : List[Any] = self.get_scheduler_config()
__UpperCAmelCase : Optional[Any] = scheduler_class(**lowercase_ )
__UpperCAmelCase : List[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(lowercase_ , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowercase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
__UpperCAmelCase : Optional[Any] = scheduler_class(**lowercase_ )
__UpperCAmelCase : Union[str, Any] = [39, 30, 12, 1, 0]
__UpperCAmelCase : Optional[Any] = len(lowercase_ )
with self.assertRaises(lowercase_ , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.scheduler_classes[0]
__UpperCAmelCase : str = self.get_scheduler_config()
__UpperCAmelCase : Optional[int] = scheduler_class(**lowercase_ )
__UpperCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 707 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_a : Dict = trt.Logger(trt.Logger.WARNING)
_a : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_a : str = logging.getLogger(__name__)
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
_a : Optional[int] = parser.parse_args()
if args.tokenizer_name:
_a : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
_a : Tuple = args.per_device_eval_batch_size
_a : Dict = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_a : Dict = True
_a : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
_a : Optional[int] = '''temp_engine/bert-fp16.engine'''
if args.inta:
_a : List[str] = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
_a : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_a : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
_a : str = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_a : str = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_a : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_a : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = np.asarray(inputs["input_ids"] , dtype=np.intaa )
__UpperCAmelCase : Any = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
__UpperCAmelCase : Optional[Any] = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowercase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowercase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowercase )
# start time
__UpperCAmelCase : Optional[int] = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowercase ) for d_inp in d_inputs] + [int(_lowercase ), int(_lowercase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase )
cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase )
# Synchronize the stream and take time
stream.synchronize()
# end time
__UpperCAmelCase : str = time.time()
__UpperCAmelCase : Any = end_time - start_time
__UpperCAmelCase : List[Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_a : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_a : int = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_a : Dict = raw_datasets['''validation'''].column_names
_a : Tuple = '''question''' if '''question''' in column_names else column_names[0]
_a : Dict = '''context''' if '''context''' in column_names else column_names[1]
_a : Any = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_a : Any = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
_a : int = min(args.max_seq_length, tokenizer.model_max_length)
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__UpperCAmelCase : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowercase , stride=args.doc_stride , return_overflowing_tokens=_lowercase , return_offsets_mapping=_lowercase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__UpperCAmelCase : List[str] = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__UpperCAmelCase : str = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__UpperCAmelCase : Tuple = tokenized_examples.sequence_ids(_lowercase )
__UpperCAmelCase : Optional[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__UpperCAmelCase : Dict = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__UpperCAmelCase : Any = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
_a : Union[str, Any] = raw_datasets['''validation''']
# Validation Feature Creation
_a : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
_a : Optional[int] = default_data_collator
_a : Tuple = eval_dataset.remove_columns(["example_id", "offset_mapping"])
_a : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="eval" ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = postprocess_qa_predictions(
examples=_lowercase , features=_lowercase , predictions=_lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowercase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__UpperCAmelCase : Optional[Any] = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
__UpperCAmelCase : List[str] = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
__UpperCAmelCase : Optional[Any] = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowercase , label_ids=_lowercase )
_a : Union[str, Any] = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(_lowercase ) ) * engine.get_binding_dtype(_lowercase ).itemsize
# Allocate device memory for inputs and outputs.
_a : Dict = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_a : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_a : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_a : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
_a : List[str] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_a : int = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
_a : str = 0.0
_a : List[Any] = 0
_a : Any = timeit.default_timer()
_a : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
_a : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_a : Any = outputs
_a : Tuple = torch.tensor(start_logits)
_a : Union[str, Any] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_a : int = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_a : List[str] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_a : Tuple = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_a : str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_a : Any = nested_truncate(all_preds, len(eval_dataset))
_a : Union[str, Any] = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
_a : Union[str, Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
_a : int = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 708 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
for char in word:
__UpperCAmelCase : Any = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = set()
for token in tokens:
__UpperCAmelCase : Any = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
__UpperCAmelCase : List[str] = list(UpperCAmelCase__ )
return word_list
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__UpperCAmelCase : Union[str, Any] = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
__UpperCAmelCase : List[str] = bert_tokens
__UpperCAmelCase , __UpperCAmelCase : Any = 0, len(UpperCAmelCase__ )
while start < end:
__UpperCAmelCase : Any = True
if is_chinese(bert_word[start] ):
__UpperCAmelCase : Union[str, Any] = min(end - start , UpperCAmelCase__ )
for i in range(UpperCAmelCase__ , 1 , -1 ):
__UpperCAmelCase : Optional[int] = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__UpperCAmelCase : int = "##" + bert_word[j]
__UpperCAmelCase : List[Any] = start + i
__UpperCAmelCase : Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
for i in range(0 , len(UpperCAmelCase__ ) , 100 ):
__UpperCAmelCase : str = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
__UpperCAmelCase : int = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
__UpperCAmelCase : Union[str, Any] = []
for i in range(0 , len(UpperCAmelCase__ ) , 100 ):
__UpperCAmelCase : List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
__UpperCAmelCase : Any = []
for input_ids, chinese_word in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
__UpperCAmelCase : str = []
for id in input_ids:
__UpperCAmelCase : Optional[int] = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
__UpperCAmelCase : Any = add_sub_symbol(UpperCAmelCase__ , UpperCAmelCase__ )
__UpperCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
__UpperCAmelCase : int = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
with open(args.file_name , "r" , encoding="utf-8" ) as f:
__UpperCAmelCase : List[Any] = f.readlines()
__UpperCAmelCase : str = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCAmelCase : Union[str, Any] = LTP(args.ltp ) # faster in GPU device
__UpperCAmelCase : str = BertTokenizer.from_pretrained(args.bert )
__UpperCAmelCase : int = prepare_ref(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
__UpperCAmelCase : Tuple = [json.dumps(UpperCAmelCase__ ) + "\n" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_a : List[str] = parser.parse_args()
main(args)
| 709 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_a : Tuple = "src/transformers"
# Matches is_xxx_available()
_a : Tuple = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_a : List[str] = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_a : Tuple = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_a : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_a : Optional[Any] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_a : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_a : Any = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_a : Optional[int] = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_a : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_a : Union[str, Any] = re.compile(R"^\s*try:")
# Catches a line with else:
_a : List[Any] = re.compile(R"^\s*else:")
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
if _re_test_backend.search(_snake_case ) is None:
return None
__UpperCAmelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
with open(_snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCAmelCase : List[Any] = f.readlines()
__UpperCAmelCase : Dict = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
__UpperCAmelCase : List[str] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__UpperCAmelCase : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
__UpperCAmelCase : List[Any] = _re_one_line_import_struct.search(_snake_case ).groups()[0]
__UpperCAmelCase : Dict = re.findall("\[([^\]]+)\]" , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__UpperCAmelCase : int = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
__UpperCAmelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__UpperCAmelCase : Dict = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__UpperCAmelCase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCAmelCase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCAmelCase : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__UpperCAmelCase : str = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
__UpperCAmelCase : Dict = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(", " )
__UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
__UpperCAmelCase : int = _re_between_brackets.search(_snake_case ).groups()[0].split(", " )
__UpperCAmelCase : Tuple = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
__UpperCAmelCase : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__UpperCAmelCase : str = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__UpperCAmelCase : int = lines[line_index]
__UpperCAmelCase : Union[str, Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__UpperCAmelCase : List[str] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
__UpperCAmelCase : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCAmelCase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__UpperCAmelCase : Tuple = lines[line_index]
__UpperCAmelCase : Any = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
__UpperCAmelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
def find_duplicates(lowerCamelCase__ ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__UpperCAmelCase : Optional[Any] = []
for key in import_dict_objects.keys():
__UpperCAmelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__UpperCAmelCase : str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__UpperCAmelCase : List[Any] = "base imports" if key == "none" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
__UpperCAmelCase : Optional[int] = os.path.join(_snake_case , "__init__.py" )
__UpperCAmelCase : Any = parse_init(_snake_case )
if objects is not None:
__UpperCAmelCase : int = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
__UpperCAmelCase : Optional[Any] = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError("\n\n".join(_snake_case ) )
def _lowercase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob("*.py" ) ) ) == 0:
continue
__UpperCAmelCase : str = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
__UpperCAmelCase : List[str] = short_path.replace(os.path.sep , "." )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
__UpperCAmelCase : Any = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
__UpperCAmelCase : int = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(_snake_case )
return submodules
_a : int = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : int = importlib.util.spec_from_file_location(
"transformers" , os.path.join(_snake_case , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__UpperCAmelCase : str = spec.loader.load_module()
__UpperCAmelCase : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
__UpperCAmelCase : List[Any] = "\n".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 710 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
__UpperCAmelCase : str = val
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCAmelCase : List[str] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
__UpperCAmelCase : str = value
else:
__UpperCAmelCase : str = value
return new_state_dict
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCAmelCase : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Tuple = in_proj_weight[:256, :]
__UpperCAmelCase : int = in_proj_bias[:256]
__UpperCAmelCase : Optional[Any] = in_proj_weight[256:512, :]
__UpperCAmelCase : List[str] = in_proj_bias[256:512]
__UpperCAmelCase : Tuple = in_proj_weight[-256:, :]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCAmelCase : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : str = in_proj_weight[:256, :]
__UpperCAmelCase : List[str] = in_proj_bias[:256]
__UpperCAmelCase : Any = in_proj_weight[256:512, :]
__UpperCAmelCase : Any = in_proj_bias[256:512]
__UpperCAmelCase : List[Any] = in_proj_weight[-256:, :]
__UpperCAmelCase : List[str] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__UpperCAmelCase : Any = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__UpperCAmelCase : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__UpperCAmelCase : Tuple = in_proj_weight_cross_attn[:256, :]
__UpperCAmelCase : Optional[Any] = in_proj_bias_cross_attn[:256]
__UpperCAmelCase : Dict = in_proj_weight_cross_attn[256:512, :]
__UpperCAmelCase : Tuple = in_proj_bias_cross_attn[256:512]
__UpperCAmelCase : Optional[int] = in_proj_weight_cross_attn[-256:, :]
__UpperCAmelCase : Dict = in_proj_bias_cross_attn[-256:]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = image.size
__UpperCAmelCase : Tuple = max(_lowercase , _lowercase )
__UpperCAmelCase : Tuple = 800 if '''detection''' in checkpoint_url else 1000
__UpperCAmelCase : str = target_max_size / current_max_size
__UpperCAmelCase : List[str] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = F.to_tensor(_lowercase )
__UpperCAmelCase : str = F.normalize(_lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
__UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase : Any = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
__UpperCAmelCase : int = state_dict.pop(_lowercase )
__UpperCAmelCase : int = val
# create HuggingFace model and load state dict
__UpperCAmelCase : Optional[int] = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__UpperCAmelCase : Optional[Any] = 15
__UpperCAmelCase : int = 2
__UpperCAmelCase : Optional[int] = {0: '''table''', 1: '''table rotated'''}
__UpperCAmelCase : Dict = idalabel
__UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
else:
__UpperCAmelCase : List[str] = 125
__UpperCAmelCase : Optional[int] = 6
__UpperCAmelCase : Union[str, Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
__UpperCAmelCase : Optional[int] = idalabel
__UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Union[str, Any] = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
__UpperCAmelCase : int = TableTransformerForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion
__UpperCAmelCase : List[Any] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
__UpperCAmelCase : int = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=_lowercase )
__UpperCAmelCase : List[Any] = Image.open(_lowercase ).convert("RGB" )
__UpperCAmelCase : List[str] = normalize(resize(_lowercase , _lowercase ) ).unsqueeze(0 )
__UpperCAmelCase : List[Any] = model(_lowercase )
if "detection" in checkpoint_url:
__UpperCAmelCase : Dict = (1, 15, 3)
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
__UpperCAmelCase : Optional[int] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
__UpperCAmelCase : int = (1, 125, 7)
__UpperCAmelCase : Tuple = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
__UpperCAmelCase : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowercase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
__UpperCAmelCase : Dict = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(_lowercase )
image_processor.push_to_hub(_lowercase )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_a : Optional[int] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 711 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowercase ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=__UpperCamelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=__UpperCamelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=__UpperCamelCase )
return parser.parse_args()
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : int = parse_args()
# Import training_script as a module.
__UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCAmelCase : int = script_fpath.stem
__UpperCAmelCase : List[Any] = importlib.import_module(__UpperCamelCase )
# Patch sys.argv
__UpperCAmelCase : int = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 712 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 384
if "tiny" in model_name:
__UpperCAmelCase : Any = [3, 3, 9, 3]
__UpperCAmelCase : List[Any] = [96, 192, 384, 768]
if "small" in model_name:
__UpperCAmelCase : str = [3, 3, 27, 3]
__UpperCAmelCase : Any = [96, 192, 384, 768]
if "base" in model_name:
__UpperCAmelCase : Union[str, Any] = [3, 3, 27, 3]
__UpperCAmelCase : Union[str, Any] = [128, 256, 512, 1024]
__UpperCAmelCase : List[Any] = 512
if "large" in model_name:
__UpperCAmelCase : List[str] = [3, 3, 27, 3]
__UpperCAmelCase : Union[str, Any] = [192, 384, 768, 1536]
__UpperCAmelCase : Tuple = 768
if "xlarge" in model_name:
__UpperCAmelCase : Optional[Any] = [3, 3, 27, 3]
__UpperCAmelCase : int = [256, 512, 1024, 2048]
__UpperCAmelCase : int = 1024
# set label information
__UpperCAmelCase : Any = 150
__UpperCAmelCase : Union[str, Any] = """huggingface/label-files"""
__UpperCAmelCase : Dict = """ade20k-id2label.json"""
__UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Optional[Any] = ConvNextConfig(
depths=lowerCamelCase__ , hidden_sizes=lowerCamelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] )
__UpperCAmelCase : Optional[Any] = UperNetConfig(
backbone_config=lowerCamelCase__ , auxiliary_in_channels=lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , )
return config
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Dict = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : List[Any] = dct.pop(lowerCamelCase__ )
__UpperCAmelCase : int = val
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
__UpperCAmelCase : Tuple = model_name_to_url[model_name]
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["""state_dict"""]
__UpperCAmelCase : Optional[Any] = get_upernet_config(lowerCamelCase__ )
__UpperCAmelCase : str = UperNetForSemanticSegmentation(lowerCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : Any = state_dict.pop(lowerCamelCase__ )
if "bn" in key:
__UpperCAmelCase : Dict = key.replace("bn" , "batch_norm" )
__UpperCAmelCase : Dict = val
# rename keys
__UpperCAmelCase : Dict = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify on image
__UpperCAmelCase : Optional[Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__UpperCAmelCase : str = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" )
__UpperCAmelCase : List[str] = SegformerImageProcessor()
__UpperCAmelCase : List[str] = processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Tuple = model(lowerCamelCase__ )
if model_name == "upernet-convnext-tiny":
__UpperCAmelCase : List[Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
__UpperCAmelCase : Any = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
__UpperCAmelCase : str = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
__UpperCAmelCase : str = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
__UpperCAmelCase : Tuple = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_a : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 713 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=2 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.0_2 , UpperCamelCase_=None , UpperCamelCase_=2 , UpperCamelCase_=2 , ):
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : List[Any] = max_length
__UpperCAmelCase : Optional[Any] = num_mel_bins
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Union[str, Any] = frequency_stride
__UpperCAmelCase : int = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCAmelCase : int = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCAmelCase : Any = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCAmelCase : int = frequency_out_dimension * time_out_dimension
__UpperCAmelCase : Optional[int] = num_patches + 2
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : str = self.get_config()
return config, input_values, labels
def _snake_case ( self ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = ASTModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : str = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self ):
__UpperCAmelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = config_and_inputs
__UpperCAmelCase : Tuple = {"input_values": input_values}
return config, inputs_dict
@require_torch
class __A (lowercase__ , lowercase__ , unittest.TestCase ):
snake_case :List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case :Dict = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
snake_case :Optional[Any] = False
snake_case :Union[str, Any] = False
snake_case :int = False
snake_case :List[str] = False
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _snake_case ( self ):
__UpperCAmelCase : int = ASTModelTester(self )
__UpperCAmelCase : Dict = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , __lowercase )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
@slow
def _snake_case ( self ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = ASTModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
__UpperCAmelCase , __UpperCAmelCase : Any = torchaudio.load(SCREAMING_SNAKE_CASE_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __A (unittest.TestCase ):
@cached_property
def _snake_case ( self ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.default_feature_extractor
__UpperCAmelCase : Tuple = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__lowercase )
__UpperCAmelCase : Tuple = self.default_feature_extractor
__UpperCAmelCase , __UpperCAmelCase : List[Any] = prepare_audio()
__UpperCAmelCase : List[str] = audio.squeeze().numpy()
__UpperCAmelCase : List[Any] = feature_extractor(__lowercase , sampling_rate=__lowercase , return_tensors="pt" ).to(__lowercase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**__lowercase )
# verify the logits
__UpperCAmelCase : Optional[Any] = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : str = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 714 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A (__snake_case ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=0 ):
__UpperCAmelCase : Tuple = 1.0 if scale is None else scale
__UpperCAmelCase : int = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def _snake_case ( self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case ( self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case ( self ):
return self.variance.sqrt()
class __A (nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(**A_ )
__UpperCAmelCase : Any = args_dim
__UpperCAmelCase : List[Any] = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
__UpperCAmelCase : Tuple = domain_map
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class __A (nn.Module ):
def __init__( self , UpperCamelCase_ ):
super().__init__()
__UpperCAmelCase : int = function
def _snake_case ( self , UpperCamelCase_ , *UpperCamelCase_ ):
return self.function(A_ , *A_ )
class __A :
snake_case :type
snake_case :int
snake_case :Dict[str, int]
def __init__( self , UpperCamelCase_ = 1 ):
__UpperCAmelCase : Tuple = dim
__UpperCAmelCase : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case ( self , UpperCamelCase_ ):
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
__UpperCAmelCase : List[Any] = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def _snake_case ( self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case ( self ):
return len(self.event_shape )
@property
def _snake_case ( self ):
return 0.0
def _snake_case ( self , UpperCamelCase_ ):
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case ( self , *UpperCamelCase_ ):
raise NotImplementedError()
@staticmethod
def _snake_case ( UpperCamelCase_ ):
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class __A (__snake_case ):
snake_case :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
snake_case :type = StudentT
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
__UpperCAmelCase : Dict = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A (__snake_case ):
snake_case :Dict[str, int] = {"loc": 1, "scale": 1}
snake_case :type = Normal
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A (__snake_case ):
snake_case :Dict[str, int] = {"total_count": 1, "logits": 1}
snake_case :type = NegativeBinomial
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None ):
__UpperCAmelCase : Optional[int] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 715 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Dict = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ['''MobileNetV2FeatureExtractor''']
_a : Any = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 0 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCamelCase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def _snake_case ( self ):
__UpperCAmelCase : int = None
ops.enable_eager_execution_internal()
__UpperCAmelCase : Optional[int] = tf.config.list_physical_devices("CPU" )
if len(lowerCamelCase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__UpperCAmelCase : List[Any] = tf.config.list_logical_devices(device_type="CPU" )
__UpperCAmelCase : Union[str, Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__UpperCAmelCase : Dict = GradientAccumulator()
__UpperCAmelCase : Optional[Any] = tf.Variable([4.0, 3.0] )
__UpperCAmelCase : int = create_optimizer(5E-5 , 10 , 5 )
__UpperCAmelCase : Optional[int] = tf.Variable([0.0, 0.0] , trainable=lowerCamelCase__ )
def accumulate_on_replica(UpperCamelCase_ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(UpperCamelCase_ , UpperCamelCase_ ):
with strategy.scope():
__UpperCAmelCase : Union[str, Any] = strategy.experimental_local_results(lowerCamelCase__ )
local_variables[0].assign(lowerCamelCase__ )
local_variables[1].assign(lowerCamelCase__ )
strategy.run(lowerCamelCase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCamelCase__ )
def _check_local_values(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCamelCase__ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowerCamelCase__ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 717 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_a : Optional[int] = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_a : Any = None
_a : List[Any] = logging.get_logger(__name__)
_a : int = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
_a : int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
_a : Optional[Any] = {
'google/rembert': 256,
}
_a : Tuple = '▁'
class __A (__lowercase ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = RemBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="[CLS]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="<unk>" , UpperCamelCase_="[SEP]" , UpperCamelCase_="<pad>" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Dict = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
__UpperCAmelCase : Optional[Any] = do_lower_case
__UpperCAmelCase : str = remove_space
__UpperCAmelCase : Any = keep_accents
__UpperCAmelCase : Optional[Any] = vocab_file
__UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Optional[Any] = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(__a ):
logger.error("Vocabulary path ({}) should be a directory".format(__a ) )
return
__UpperCAmelCase : Optional[Any] = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 719 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_a : Optional[Any] = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_a : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
if "://" in dataset_path:
__UpperCAmelCase : Optional[int] = dataset_path.split("://" )[1]
return dataset_path
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = not is_remote_filesystem(_UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_UpperCamelCase ) , fs._strip_protocol(_UpperCamelCase ) )
else:
fs.mv(_UpperCamelCase , _UpperCamelCase , recursive=_UpperCamelCase )
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__UpperCAmelCase : int = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = threading.Lock()
| 720 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 0 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_a : Optional[int] = logging.getLogger(__name__)
class __A :
def __init__( self ):
__UpperCAmelCase : int = False
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if not self.initialized:
__UpperCAmelCase : Optional[int] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
__UpperCAmelCase : Any = True
def _snake_case ( self ):
self.retriever.index.init_index()
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class __A (__a ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you\'ll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
__UpperCAmelCase : Optional[int] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def _snake_case ( self ):
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCAmelCase : Any = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCAmelCase , __UpperCAmelCase : Dict = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = kwargs.pop("config" , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
__UpperCAmelCase : Union[str, Any] = RagTokenizer.from_pretrained(A__ , config=A__ )
__UpperCAmelCase : str = rag_tokenizer.question_encoder
__UpperCAmelCase : Any = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCAmelCase : Optional[int] = "custom"
__UpperCAmelCase : Dict = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
__UpperCAmelCase : Optional[int] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 721 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def _lowercase ( lowerCamelCase__=None ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : str = argparse.ArgumentParser(add_help=_SCREAMING_SNAKE_CASE , allow_abbrev=_SCREAMING_SNAKE_CASE )
# The main config parser
__UpperCAmelCase : Any = config_command_parser(_SCREAMING_SNAKE_CASE )
# The subparser to add commands to
__UpperCAmelCase : List[str] = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Any = get_config_parser()
__UpperCAmelCase : Union[str, Any] = config_parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 700 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Optional[int] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : Optional[int] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Tuple = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Any = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : Union[str, Any] = num_choices
__UpperCAmelCase : List[str] = scope
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
__UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = LlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
__UpperCAmelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : List[Any] = LlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
__UpperCAmelCase : Dict = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
__UpperCAmelCase : Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : int = True
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
__UpperCAmelCase : int = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
__UpperCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCAmelCase : str = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
__UpperCAmelCase : List[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
# select random slice
__UpperCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : str = config_and_inputs
__UpperCAmelCase : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
snake_case :List[str] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
snake_case :str = (LlamaForCausalLM,) if is_torch_available() else ()
snake_case :Union[str, Any] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case :Optional[int] = False
snake_case :Union[str, Any] = False
def _snake_case ( self ):
__UpperCAmelCase : Dict = LlamaModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : Optional[int] = input_dict["input_ids"]
__UpperCAmelCase : Dict = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : int = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = 3
__UpperCAmelCase : Any = "single_label_classification"
__UpperCAmelCase : Tuple = input_dict["input_ids"]
__UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : str = "multi_label_classification"
__UpperCAmelCase : Tuple = input_dict["input_ids"]
__UpperCAmelCase : Tuple = input_ids.ne(1 ).to(__UpperCamelCase )
__UpperCAmelCase : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase : Any = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__UpperCAmelCase : int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def _snake_case ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = ids_tensor([1, 10] , config.vocab_size )
__UpperCAmelCase : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : str = LlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
__UpperCAmelCase : List[str] = original_model(__UpperCamelCase ).last_hidden_state
__UpperCAmelCase : int = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Tuple = {"type": scaling_type, "factor": 1_0.0}
__UpperCAmelCase : Any = LlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
__UpperCAmelCase : int = scaled_model(__UpperCamelCase ).last_hidden_state
__UpperCAmelCase : List[str] = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
@require_torch
class __A (unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
__UpperCAmelCase : Optional[int] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCAmelCase : List[str] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
__UpperCAmelCase : int = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : List[Any] = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Tuple = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def _snake_case ( self ):
__UpperCAmelCase : str = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
__UpperCAmelCase : Optional[int] = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
__UpperCAmelCase : Optional[Any] = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : str = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
__UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
__UpperCAmelCase : List[str] = model(torch.tensor(__UpperCamelCase ) )
__UpperCAmelCase : Any = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCAmelCase : List[Any] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
__UpperCAmelCase : Union[str, Any] = "Simply put, the theory of relativity states that "
__UpperCAmelCase : Tuple = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
__UpperCAmelCase : Dict = tokenizer.encode(__UpperCamelCase , return_tensors="pt" )
__UpperCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__UpperCamelCase )
# greedy generation outputs
__UpperCAmelCase : int = model.generate(__UpperCamelCase , max_new_tokens=64 , top_p=__UpperCamelCase , temperature=1 , do_sample=__UpperCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 701 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_a : List[str] = TypeVar("T")
class __A (Generic[T] ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any | T = None
__UpperCAmelCase : int = len(lowercase__ )
__UpperCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
__UpperCAmelCase : Optional[Any] = fnc
self.build()
def _snake_case ( self ):
for p in range(self.N - 1 , 0 , -1 ):
__UpperCAmelCase : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
p += self.N
__UpperCAmelCase : int = v
while p > 1:
__UpperCAmelCase : str = p // 2
__UpperCAmelCase : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): # noqa: E741
__UpperCAmelCase : Optional[Any] = l + self.N, r + self.N
__UpperCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
__UpperCAmelCase : Any = self.st[l] if res is None else self.fn(lowercase__ , self.st[l] )
if r % 2 == 0:
__UpperCAmelCase : List[str] = self.st[r] if res is None else self.fn(lowercase__ , self.st[r] )
__UpperCAmelCase : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_a : Any = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_a : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_a : Optional[int] = SegmentTree(test_array, min)
_a : Dict = SegmentTree(test_array, max)
_a : Tuple = SegmentTree(test_array, lambda a, b: a + b)
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
for i in range(len(lowerCAmelCase_ ) ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__UpperCAmelCase : List[str] = reduce(lowerCAmelCase_ , test_array[i : j + 1] )
__UpperCAmelCase : Optional[int] = reduce(lowerCAmelCase_ , test_array[i : j + 1] )
__UpperCAmelCase : Dict = reduce(lambda lowerCamelCase__ , lowerCamelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_ )
assert max_range == max_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_ )
test_all_segments()
for index, value in test_updates.items():
_a : Tuple = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 702 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_a : List[Any] = get_logger(__name__)
_a : Tuple = Path(__file__).parent / "model_card_template.md"
_a : Dict = uuida().hex
_a : Tuple = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
_a : Any = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
_a : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def _lowercase ( lowerCamelCase__ = None ) -> str:
"""simple docstring"""
__UpperCAmelCase : List[Any] = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ) -> Optional[Any]:
"""simple docstring"""
if token is None:
__UpperCAmelCase : Optional[Any] = HfFolder.get_token()
if organization is None:
__UpperCAmelCase : Tuple = whoami(SCREAMING_SNAKE_CASE__ )["""name"""]
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(SCREAMING_SNAKE_CASE__ , "local_rank" ) and args.local_rank not in [-1, 0]:
return
__UpperCAmelCase : Optional[int] = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , "hub_token" ) else None
__UpperCAmelCase : Union[str, Any] = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
__UpperCAmelCase : Optional[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
__UpperCAmelCase : Tuple = os.path.join(args.output_dir , "README.md" )
model_card.save(SCREAMING_SNAKE_CASE__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None ) -> List[str]:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
__UpperCAmelCase : Optional[int] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
__UpperCAmelCase : List[str] = re.search(R"snapshots/([^/]+)/" , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
__UpperCAmelCase : Union[str, Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_a : Union[str, Any] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
_a : str = os.path.join(hf_cache_home, "diffusers")
def _lowercase ( lowerCamelCase__ = None , lowerCamelCase__ = None ) -> int:
"""simple docstring"""
if new_cache_dir is None:
__UpperCAmelCase : Tuple = DIFFUSERS_CACHE
if old_cache_dir is None:
__UpperCAmelCase : List[Any] = old_diffusers_cache
__UpperCAmelCase : List[str] = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
__UpperCAmelCase : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__UpperCAmelCase : Union[str, Any] = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_a : Union[str, Any] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
_a : str = 0
else:
with open(cache_version_file) as f:
try:
_a : Dict = int(f.read())
except ValueError:
_a : Tuple = 0
if cache_version < 1:
_a : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
_a : List[Any] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"the directory exists and can be written to."
)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None ) -> Union[str, Any]:
"""simple docstring"""
if variant is not None:
__UpperCAmelCase : int = weights_name.split("." )
__UpperCAmelCase : Dict = splits[:-1] + [variant] + splits[-1:]
__UpperCAmelCase : int = """.""".join(SCREAMING_SNAKE_CASE__ )
return weights_name
def _lowercase ( lowerCamelCase__ , *,
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : int = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
__UpperCAmelCase : str = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
__UpperCAmelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse("0.20.0" )
):
try:
__UpperCAmelCase : Tuple = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.""" , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.""" , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
__UpperCAmelCase : List[str] = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
"this model name. Check the model page at "
f"""\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f"""Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from """
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"""Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 703 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
__UpperCAmelCase : Dict = DetaConfig(
backbone_config=a_ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=a_ , with_box_refine=a_ , two_stage=a_ , )
# set labels
__UpperCAmelCase : Dict = '''huggingface/label-files'''
if "o365" in model_name:
__UpperCAmelCase : int = 366
__UpperCAmelCase : str = '''object365-id2label.json'''
else:
__UpperCAmelCase : List[Any] = 91
__UpperCAmelCase : int = '''coco-detection-id2label.json'''
__UpperCAmelCase : List[Any] = num_labels
__UpperCAmelCase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(a_ , a_ , repo_type="dataset" ) ) , "r" ) )
__UpperCAmelCase : List[Any] = {int(a_ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Dict = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Tuple = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : List[Any] = dct.pop(a_ )
__UpperCAmelCase : Tuple = val
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : List[str] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__UpperCAmelCase : Dict = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Tuple = in_proj_weight[:dim, :]
__UpperCAmelCase : List[str] = in_proj_bias[: dim]
__UpperCAmelCase : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Optional[int] = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : Dict = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCAmelCase : Any = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Dict = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : Any = in_proj_bias[:hidden_size]
__UpperCAmelCase : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : Any = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : Optional[int] = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[-hidden_size:]
def _lowercase ( ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = get_deta_config(a_ )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : Optional[Any] = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[Any] = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
__UpperCAmelCase : int = torch.load(a_ , map_location="cpu" )['''model''']
# original state dict
for name, param in state_dict.items():
print(a_ , param.shape )
# rename keys
__UpperCAmelCase : Union[str, Any] = create_rename_keys(a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_swin_q_k_v(a_ , config.backbone_config )
read_in_decoder_q_k_v(a_ , a_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : List[str] = state_dict.pop(a_ )
__UpperCAmelCase : List[str] = val
if "input_proj" in key:
__UpperCAmelCase : Optional[Any] = state_dict.pop(a_ )
__UpperCAmelCase : str = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : Dict = state_dict.pop(a_ )
__UpperCAmelCase : str = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Tuple = DetaForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
__UpperCAmelCase : Any = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(a_ )
# load image processor
__UpperCAmelCase : Tuple = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
__UpperCAmelCase : int = prepare_img()
__UpperCAmelCase : List[str] = processor(images=a_ , return_tensors="pt" )
__UpperCAmelCase : int = encoding['''pixel_values''']
__UpperCAmelCase : List[str] = model(pixel_values.to(a_ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : str = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__UpperCAmelCase : Optional[int] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[int] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(a_ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(a_ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_a : List[str] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 704 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A (__magic_name__ ):
snake_case :str = ["image_processor", "tokenizer"]
snake_case :Union[str, Any] = "LayoutLMv3ImageProcessor"
snake_case :List[str] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
__UpperCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
__UpperCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
__UpperCAmelCase : str = self.image_processor(images=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__UpperCAmelCase : Dict = features["words"]
__UpperCAmelCase : Union[str, Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
# add pixel values
__UpperCAmelCase : Tuple = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__UpperCAmelCase : List[str] = self.get_overflowing_images(UpperCamelCase_ , encoded_inputs["overflow_to_sample_mapping"] )
__UpperCAmelCase : List[str] = images
return encoded_inputs
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f""" {len(UpperCamelCase_ )} and {len(UpperCamelCase_ )}""" )
return images_with_overflow
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def _snake_case ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _snake_case ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase_ , )
return self.image_processor_class
@property
def _snake_case ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase_ , )
return self.image_processor
| 705 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_a : Any = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ["OwlViTFeatureExtractor"]
_a : Optional[Any] = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A (UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
snake_case :Any = IFInpaintingPipeline
snake_case :Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case :List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case :Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self ):
return self._get_dummy_components()
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(_snake_case ).startswith("mps" ):
__UpperCAmelCase : int = torch.manual_seed(_snake_case )
else:
__UpperCAmelCase : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__UpperCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
__UpperCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
__UpperCAmelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _snake_case ( self ):
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case ( self ):
self._test_save_load_local()
def _snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 707 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 0 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __A :
def _snake_case ( self , UpperCamelCase_ ):
raise NotImplementedError()
def _snake_case ( self ):
raise NotImplementedError()
class __A (_lowerCAmelCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = False , **UpperCamelCase_ ):
__UpperCAmelCase : Any = tokenizer
__UpperCAmelCase : Any = skip_prompt
__UpperCAmelCase : Optional[int] = decode_kwargs
# variables used in the streaming process
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Any = True
def _snake_case ( self , UpperCamelCase_ ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
__UpperCAmelCase : Tuple = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__UpperCAmelCase : int = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__UpperCAmelCase : str = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
__UpperCAmelCase : Tuple = text[self.print_len :]
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Dict = 0
# If the last token is a CJK character, we print the characters.
elif len(_lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__UpperCAmelCase : int = text[self.print_len :]
self.print_len += len(_lowerCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__UpperCAmelCase : Dict = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(_lowerCAmelCase )
self.on_finalized_text(_lowerCAmelCase )
def _snake_case ( self ):
if len(self.token_cache ) > 0:
__UpperCAmelCase : Any = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__UpperCAmelCase : Optional[Any] = text[self.print_len :]
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[Any] = 0
else:
__UpperCAmelCase : Tuple = ""
__UpperCAmelCase : List[Any] = True
self.on_finalized_text(_lowerCAmelCase , stream_end=_lowerCAmelCase )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False ):
print(_lowerCAmelCase , flush=_lowerCAmelCase , end="" if not stream_end else None )
def _snake_case ( self , UpperCamelCase_ ):
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
class __A (_lowerCAmelCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , **UpperCamelCase_ ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
__UpperCAmelCase : List[Any] = Queue()
__UpperCAmelCase : Any = None
__UpperCAmelCase : Any = timeout
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False ):
self.text_queue.put(_lowerCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
return self
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 708 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = len(snake_case_ )
print("The following activities are selected:" )
# The first activity is always selected
__UpperCAmelCase : Tuple = 0
print(snake_case_ , end="," )
# Consider rest of the activities
for j in range(snake_case_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case_ , end="," )
__UpperCAmelCase : Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Tuple = [1, 3, 0, 5, 8, 5]
_a : Optional[int] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 709 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_a : List[str] = 'src/transformers'
# Matches is_xxx_available()
_a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_a : Tuple = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_a : List[Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_a : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_a : Dict = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_a : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_a : int = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_a : int = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_a : List[str] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_a : Optional[Any] = re.compile(R"^\s*try:")
# Catches a line with else:
_a : Tuple = re.compile(R"^\s*else:")
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(__UpperCamelCase ) is None:
return None
__UpperCAmelCase : Dict = [b[0] for b in _re_backend.findall(__UpperCamelCase )]
backends.sort()
return "_and_".join(__UpperCamelCase )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
with open(__UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCAmelCase : str = f.readlines()
__UpperCAmelCase : Optional[Any] = 0
while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__UpperCAmelCase : Union[str, Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__UpperCAmelCase : Optional[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCamelCase ):
__UpperCAmelCase : Dict = _re_one_line_import_struct.search(__UpperCamelCase ).groups()[0]
__UpperCAmelCase : str = re.findall("\[([^\]]+)\]" , __UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__UpperCAmelCase : Union[str, Any] = _re_import_struct_key_value.search(__UpperCamelCase )
if single_line_import_search is not None:
__UpperCAmelCase : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__UpperCAmelCase : List[Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__UpperCAmelCase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__UpperCAmelCase : Tuple = lines[line_index]
if _re_import_struct_add_one.search(__UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None:
__UpperCAmelCase : Dict = _re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(", " )
__UpperCAmelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_between_brackets.search(__UpperCamelCase ) is not None:
__UpperCAmelCase : int = _re_between_brackets.search(__UpperCamelCase ).groups()[0].split(", " )
__UpperCAmelCase : str = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_quote_object.search(__UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
__UpperCAmelCase : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__UpperCAmelCase : Union[str, Any] = []
while (
line_index < len(__UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__UpperCAmelCase : Optional[int] = lines[line_index]
__UpperCAmelCase : Union[str, Any] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__UpperCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__UpperCAmelCase : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCAmelCase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__UpperCAmelCase : Tuple = lines[line_index]
__UpperCAmelCase : Optional[Any] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
__UpperCAmelCase : List[str] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
def find_duplicates(lowerCamelCase__ ):
return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__UpperCAmelCase : Any = []
for key in import_dict_objects.keys():
__UpperCAmelCase : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__UpperCAmelCase : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__UpperCAmelCase : List[Any] = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
__UpperCAmelCase : Tuple = os.path.join(__UpperCamelCase , "__init__.py" )
__UpperCAmelCase : str = parse_init(__UpperCamelCase )
if objects is not None:
__UpperCAmelCase : int = analyze_results(*__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
__UpperCAmelCase : Any = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__UpperCamelCase ) )
if len(__UpperCamelCase ) > 0:
raise ValueError("\n\n".join(__UpperCamelCase ) )
def _lowercase ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Any = []
for path, directories, files in os.walk(__UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
__UpperCAmelCase : List[str] = str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) )
__UpperCAmelCase : Optional[int] = short_path.replace(os.path.sep , "." )
submodules.append(__UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
__UpperCAmelCase : Union[str, Any] = str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) )
__UpperCAmelCase : List[Any] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__UpperCamelCase )
return submodules
_a : Tuple = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase : List[Any] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__UpperCamelCase , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__UpperCAmelCase : Optional[Any] = spec.loader.load_module()
__UpperCAmelCase : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__UpperCamelCase ) > 0:
__UpperCAmelCase : List[Any] = '''\n'''.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 710 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : List[str] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_a : List[Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_a : List[Any] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_a : List[str] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_a : List[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_a : int = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_a : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_a : str = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_a : int = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __A (lowerCAmelCase__ ):
snake_case :Any = VOCAB_FILES_NAMES
snake_case :Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case :List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :List[str] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __A (lowerCAmelCase__ ):
snake_case :str = VOCAB_FILES_NAMES
snake_case :List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_a : Union[str, Any] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_a : Dict = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_a : Dict = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(lowerCAmelCase__ )
class __A :
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
if titles is None and texts is None:
return super().__call__(
_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
elif titles is None or texts is None:
__UpperCAmelCase : str = titles if texts is None else texts
return super().__call__(
_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
__UpperCAmelCase : Optional[Any] = titles if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [titles]
__UpperCAmelCase : Optional[Any] = texts if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [texts]
__UpperCAmelCase : Any = len(_lowerCamelCase )
__UpperCAmelCase : List[Any] = questions if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [questions] * n_passages
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts.""" )
__UpperCAmelCase : Optional[int] = super().__call__(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )["input_ids"]
__UpperCAmelCase : int = super().__call__(_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )["input_ids"]
__UpperCAmelCase : Any = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCamelCase , _lowerCamelCase )
]
}
if return_attention_mask is not False:
__UpperCAmelCase : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__UpperCAmelCase : Union[str, Any] = attention_mask
return self.pad(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ):
__UpperCAmelCase : Any = reader_input["input_ids"]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = reader_output[:3]
__UpperCAmelCase : Optional[Any] = len(_lowerCamelCase )
__UpperCAmelCase : str = sorted(range(_lowerCamelCase ) , reverse=_lowerCamelCase , key=relevance_logits.__getitem__ )
__UpperCAmelCase : int = []
for doc_id in sorted_docs:
__UpperCAmelCase : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__UpperCAmelCase : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__UpperCAmelCase : Tuple = sequence_ids.index(self.pad_token_id )
else:
__UpperCAmelCase : str = len(_lowerCamelCase )
__UpperCAmelCase : List[str] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCamelCase , top_spans=_lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCamelCase , start_index=_lowerCamelCase , end_index=_lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Dict = []
for start_index, start_score in enumerate(_lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__UpperCAmelCase : str = sorted(_lowerCamelCase , key=lambda UpperCamelCase_ : x[1] , reverse=_lowerCamelCase )
__UpperCAmelCase : Any = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
__UpperCAmelCase : Optional[Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class __A (lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case :Tuple = VOCAB_FILES_NAMES
snake_case :List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
snake_case :int = ["input_ids", "attention_mask"]
| 711 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_a : str = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __A (_A ):
snake_case :Optional[int] = "facebook/nllb-200-distilled-600M"
snake_case :Any = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`."
)
snake_case :Union[str, Any] = "translator"
snake_case :str = AutoTokenizer
snake_case :List[Any] = AutoModelForSeqaSeqLM
snake_case :Union[str, Any] = LANGUAGE_CODES
snake_case :Tuple = ["text", "text", "text"]
snake_case :Any = ["text"]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
__UpperCAmelCase : Optional[Any] = self.lang_to_code[src_lang]
__UpperCAmelCase : Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase_ , return_tensors="pt" , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return self.model.generate(**UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase_ )
| 712 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : Tuple = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class __A (__lowercase ):
'''simple docstring'''
snake_case :List[str] = 'data2vec-text'
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Dict = type_vocab_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : Optional[Any] = position_embedding_type
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : List[str] = classifier_dropout
class __A (__lowercase ):
'''simple docstring'''
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 713 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : List[str] = logging.get_logger(__name__)
_a : Dict = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __A (UpperCamelCase__ , UpperCamelCase__ ):
snake_case :Any = "focalnet"
def __init__( self , UpperCamelCase_=2_24 , UpperCamelCase_=4 , UpperCamelCase_=3 , UpperCamelCase_=96 , UpperCamelCase_=False , UpperCamelCase_=[1_92, 3_84, 7_68, 7_68] , UpperCamelCase_=[2, 2, 6, 2] , UpperCamelCase_=[2, 2, 2, 2] , UpperCamelCase_=[3, 3, 3, 3] , UpperCamelCase_="gelu" , UpperCamelCase_=4.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=False , UpperCamelCase_=1E-4 , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_=32 , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(**__A )
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : str = num_channels
__UpperCAmelCase : Dict = embed_dim
__UpperCAmelCase : Union[str, Any] = use_conv_embed
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : List[Any] = focal_levels
__UpperCAmelCase : Dict = focal_windows
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = drop_path_rate
__UpperCAmelCase : str = use_layerscale
__UpperCAmelCase : Optional[Any] = layerscale_value
__UpperCAmelCase : Any = use_post_layernorm
__UpperCAmelCase : Dict = use_post_layernorm_in_modulation
__UpperCAmelCase : Dict = normalize_modulator
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : List[Any] = encoder_stride
__UpperCAmelCase : Any = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 714 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : Dict = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __A (__UpperCAmelCase , unittest.TestCase ):
snake_case :List[str] = GPTSwaTokenizer
snake_case :Dict = False
snake_case :int = True
snake_case :List[Any] = False
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = "This is a test"
__UpperCAmelCase : List[str] = "This is a test"
return input_text, output_text
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = "<s>"
__UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
__UpperCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 20_00 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def _snake_case ( self ):
__UpperCAmelCase : int = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
__UpperCAmelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
__UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
__UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Optional[int] = ["This is a test", "I was born in 92000, and this is falsé."]
__UpperCAmelCase : Optional[Any] = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
__UpperCAmelCase : Optional[int] = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__SCREAMING_SNAKE_CASE , )
| 715 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __A (unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 2_55 , UpperCamelCase_=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Union[str, Any] = num_channels
__UpperCAmelCase : str = min_resolution
__UpperCAmelCase : Tuple = max_resolution
__UpperCAmelCase : List[str] = do_resize
__UpperCAmelCase : int = size
__UpperCAmelCase : Optional[Any] = do_normalize
__UpperCAmelCase : Any = image_mean
__UpperCAmelCase : Any = image_std
__UpperCAmelCase : Optional[Any] = do_rescale
__UpperCAmelCase : Optional[Any] = rescale_factor
__UpperCAmelCase : List[str] = do_pad
def _snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False ):
if not batched:
__UpperCAmelCase : List[str] = image_inputs[0]
if isinstance(__a , Image.Image ):
__UpperCAmelCase : Tuple = image.size
else:
__UpperCAmelCase : str = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase : List[Any] = int(self.size["shortest_edge"] * h / w )
__UpperCAmelCase : List[Any] = self.size['shortest_edge']
elif w > h:
__UpperCAmelCase : Optional[Any] = self.size['shortest_edge']
__UpperCAmelCase : List[str] = int(self.size["shortest_edge"] * w / h )
else:
__UpperCAmelCase : Any = self.size['shortest_edge']
__UpperCAmelCase : str = self.size['shortest_edge']
else:
__UpperCAmelCase : List[Any] = []
for image in image_inputs:
__UpperCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase : Optional[Any] = max(__a , key=lambda UpperCamelCase_ : item[0] )[0]
__UpperCAmelCase : int = max(__a , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A (UpperCamelCase_ , unittest.TestCase ):
snake_case :List[Any] = DetaImageProcessor if is_vision_available() else None
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = DetaImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
__UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "image_mean" ) )
self.assertTrue(hasattr(__a , "image_std" ) )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "do_rescale" ) )
self.assertTrue(hasattr(__a , "do_pad" ) )
self.assertTrue(hasattr(__a , "size" ) )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __a )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a )
__UpperCAmelCase : List[Any] = image_processing(__a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase : Any = image_processing(__a , return_tensors="pt" ).pixel_values
__UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ):
__UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase : Any = image_processing(__a , return_tensors="pt" ).pixel_values
__UpperCAmelCase : str = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__UpperCAmelCase : Optional[int] = json.loads(f.read() )
__UpperCAmelCase : str = {'image_id': 3_97_69, 'annotations': target}
# encode them
__UpperCAmelCase : List[Any] = DetaImageProcessor()
__UpperCAmelCase : Optional[Any] = image_processing(images=__a , annotations=__a , return_tensors="pt" )
# verify pixel values
__UpperCAmelCase : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __a )
__UpperCAmelCase : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
__UpperCAmelCase : Optional[int] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
__UpperCAmelCase : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
__UpperCAmelCase : str = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
__UpperCAmelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
__UpperCAmelCase : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify orig_size
__UpperCAmelCase : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
__UpperCAmelCase : Dict = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__UpperCAmelCase : List[Any] = json.loads(f.read() )
__UpperCAmelCase : List[str] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__UpperCAmelCase : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__UpperCAmelCase : Union[str, Any] = DetaImageProcessor(format="coco_panoptic" )
__UpperCAmelCase : Dict = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="pt" )
# verify pixel values
__UpperCAmelCase : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __a )
__UpperCAmelCase : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
__UpperCAmelCase : Dict = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __a ) )
# verify boxes
__UpperCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __a )
__UpperCAmelCase : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __a , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __a ) )
# verify is_crowd
__UpperCAmelCase : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __a ) )
# verify class_labels
__UpperCAmelCase : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __a ) )
# verify masks
__UpperCAmelCase : Optional[int] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __a )
# verify orig_size
__UpperCAmelCase : Optional[int] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __a ) )
# verify size
__UpperCAmelCase : Tuple = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __a ) )
| 716 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__A ) != 0:
__UpperCAmelCase : Optional[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
__UpperCAmelCase : Union[str, Any] = rows
else:
__UpperCAmelCase : List[str] = []
def _snake_case ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _snake_case ( self ):
return len(self.rows )
@property
def _snake_case ( self ):
return len(self.rows[0] )
@property
def _snake_case ( self ):
return (self.num_rows, self.num_columns)
@property
def _snake_case ( self ):
return self.order[0] == self.order[1]
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def _snake_case ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _snake_case ( self ):
return bool(self.determinant() )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def _snake_case ( self ):
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _snake_case ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__A ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Tuple = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__A )
else:
__UpperCAmelCase : str = self.rows[0:position] + [row] + self.rows[position:]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Optional[int] = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
__UpperCAmelCase : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__UpperCAmelCase : Any = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , UpperCamelCase_ ):
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , UpperCamelCase_ ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , UpperCamelCase_ ):
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , UpperCamelCase_ ):
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , UpperCamelCase_ ):
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self , UpperCamelCase_ ):
if not isinstance(__A , __A ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
__UpperCAmelCase : Optional[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ ):
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_a : Tuple = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_a : Dict = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_a : List[str] = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_a : Any = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_a : Dict = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_a : Tuple = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_a : Union[str, Any] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = randrange(len(_lowerCamelCase ) ), randrange(len(_lowerCamelCase ) )
__UpperCAmelCase : Optional[Any] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__UpperCAmelCase : List[str] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _lowercase ( lowerCamelCase__ = 100 ) -> List[str]:
"""simple docstring"""
return (generate_random_hand() for _ in range(_lowerCamelCase ))
@pytest.mark.parametrize("hand, expected" , _lowerCamelCase )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(_lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , _lowerCamelCase )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(_lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , _lowerCamelCase )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Tuple = PokerHand(_lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , _lowerCamelCase )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
assert PokerHand(_lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , _lowerCamelCase )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
assert PokerHand(_lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , _lowerCamelCase )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
def _lowercase ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : List[str] = [PokerHand(_lowerCamelCase ) for hand in SORTED_HANDS]
__UpperCAmelCase : Dict = poker_hands.copy()
shuffle(_lowerCamelCase )
__UpperCAmelCase : Any = chain(sorted(_lowerCamelCase ) )
for index, hand in enumerate(_lowerCamelCase ):
assert hand == poker_hands[index]
def _lowercase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=_lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : str = PokerHand("2C 4S AS 3D 5C" )
__UpperCAmelCase : str = True
__UpperCAmelCase : List[str] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : int = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
__UpperCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , "poker_hands.txt" )
with open(_lowerCamelCase ) as file_hand:
for line in file_hand:
__UpperCAmelCase : Tuple = line[:14].strip()
__UpperCAmelCase : Union[str, Any] = line[15:].strip()
__UpperCAmelCase : List[str] = PokerHand(_lowerCamelCase ), PokerHand(_lowerCamelCase )
__UpperCAmelCase : int = player.compare_with(_lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 718 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=2_24 , UpperCamelCase_=10_00 , UpperCamelCase_=[3, 3, 6, 4] , UpperCamelCase_=[48, 56, 1_12, 2_20] , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : Tuple = use_labels
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : List[Any] = layer_depths
__UpperCAmelCase : str = embed_dims
def _snake_case ( self ):
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=A__ , layer_scale_init_value=1E-5 , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = SwiftFormerModel(config=A__ )
model.to(A__ )
model.eval()
__UpperCAmelCase : List[Any] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : Tuple = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
__UpperCAmelCase : int = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase : List[str] = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
__UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Optional[int] = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
(__UpperCAmelCase) : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Dict = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
snake_case :List[Any] = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
snake_case :Dict = False
snake_case :Union[str, Any] = False
snake_case :Any = False
snake_case :int = False
snake_case :Any = False
def _snake_case ( self ):
__UpperCAmelCase : List[str] = SwiftFormerModelTester(self )
__UpperCAmelCase : int = ConfigTester(
self , config_class=A__ , has_text_modality=A__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(A__ )
__UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(A__ )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def _snake_case ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = SwiftFormerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self ):
pass
def _snake_case ( self ):
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Any = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Any = model(**self._prepare_for_class(A__ , A__ ) )
__UpperCAmelCase : str = outputs.hidden_states
__UpperCAmelCase : Tuple = 8
self.assertEqual(len(A__ ) , A__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(A__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
check_hidden_states_output(A__ , A__ , A__ )
def _snake_case ( self ):
def _config_zero_init(UpperCamelCase_ ):
__UpperCAmelCase : str = copy.deepcopy(A__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(A__ , A__ , 1E-10 )
if isinstance(getattr(A__ , A__ , A__ ) , A__ ):
__UpperCAmelCase : Optional[Any] = _config_zero_init(getattr(A__ , A__ ) )
setattr(A__ , A__ , A__ )
return configs_no_init
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = _config_zero_init(A__ )
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(config=A__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ):
pass
def _lowercase ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase ):
@cached_property
def _snake_case ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(A__ )
__UpperCAmelCase : Any = self.default_image_processor
__UpperCAmelCase : List[Any] = prepare_img()
__UpperCAmelCase : Optional[Any] = image_processor(images=A__ , return_tensors="pt" ).to(A__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**A__ )
# verify the logits
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
__UpperCAmelCase : Any = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4 ) )
| 719 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a : List[Any] = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , "sklearn" )
return (preds == labels).mean()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , "sklearn" )
__UpperCAmelCase : Optional[Any] = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : Optional[Any] = fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , "sklearn" )
__UpperCAmelCase : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0]
__UpperCAmelCase : int = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , "sklearn" )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCAmelCase__ , lowerCAmelCase__ )
elif task_name == "qqp":
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(lowerCAmelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
warnings.warn(lowerCAmelCase__ , lowerCAmelCase__ )
requires_backends(lowerCAmelCase__ , "sklearn" )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(lowerCAmelCase__ )
| 720 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a : List[str] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , "sklearn" )
return (preds == labels).mean()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , "sklearn" )
__UpperCAmelCase : List[Any] = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
__UpperCAmelCase : Optional[int] = fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , "sklearn" )
__UpperCAmelCase : Tuple = pearsonr(_lowerCamelCase , _lowerCamelCase )[0]
__UpperCAmelCase : List[str] = spearmanr(_lowerCamelCase , _lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , "sklearn" )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCamelCase , _lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(_lowerCamelCase )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , "sklearn" )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(_lowerCamelCase )
| 721 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import math
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : int = int(math.sqrt(lowerCamelCase__ ) ) # Size of every segment
__UpperCAmelCase : str = [True] * (end + 1)
__UpperCAmelCase : int = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCamelCase__ )
for i in range(start * start , end + 1 , lowerCamelCase__ ):
__UpperCAmelCase : List[str] = False
start += 1
prime += in_prime
__UpperCAmelCase : Optional[int] = end + 1
__UpperCAmelCase : str = min(2 * end , lowerCamelCase__ )
while low <= n:
__UpperCAmelCase : int = [True] * (high - low + 1)
for each in in_prime:
__UpperCAmelCase : int = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCamelCase__ , high + 1 , lowerCamelCase__ ):
__UpperCAmelCase : int = False
for j in range(len(lowerCamelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
__UpperCAmelCase : str = high + 1
__UpperCAmelCase : List[Any] = min(high + end , lowerCamelCase__ )
return prime
print(sieve(10**6))
| 700 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 0 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __A (_UpperCAmelCase ):
def __init__( self , *UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(*lowercase__ , **lowercase__ )
__UpperCAmelCase : List[str] = eval_examples
__UpperCAmelCase : int = post_process_function
def _snake_case ( self , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_ = "eval" , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = gen_kwargs.copy()
__UpperCAmelCase : List[str] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
__UpperCAmelCase : Dict = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
__UpperCAmelCase : Optional[int] = gen_kwargs
__UpperCAmelCase : Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCAmelCase : List[str] = self.get_eval_dataloader(lowercase__ )
__UpperCAmelCase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : int = self.compute_metrics
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : List[str] = time.time()
__UpperCAmelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase : str = eval_loop(
lowercase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , metric_key_prefix=lowercase__ , )
finally:
__UpperCAmelCase : List[str] = compute_metrics
__UpperCAmelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase__ , lowercase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__UpperCAmelCase : Any = self.post_process_function(lowercase__ , lowercase__ , lowercase__ )
__UpperCAmelCase : str = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__UpperCAmelCase : Tuple = metrics.pop(lowercase__ )
metrics.update(output.metrics )
else:
__UpperCAmelCase : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCAmelCase : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase__ )
return metrics
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_ = "test" , **UpperCamelCase_ ):
__UpperCAmelCase : Dict = gen_kwargs.copy()
__UpperCAmelCase : List[str] = self.get_test_dataloader(lowercase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : List[str] = self.compute_metrics
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase : Dict = time.time()
__UpperCAmelCase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase : Union[str, Any] = eval_loop(
lowercase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , metric_key_prefix=lowercase__ , )
finally:
__UpperCAmelCase : str = compute_metrics
__UpperCAmelCase : Tuple = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
lowercase__ , lowercase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCAmelCase : List[Any] = self.post_process_function(lowercase__ , lowercase__ , lowercase__ , "predict" )
__UpperCAmelCase : Dict = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__UpperCAmelCase : int = metrics.pop(lowercase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase__ )
| 701 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError("String lengths must match!" )
__UpperCAmelCase : Tuple = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_a : Optional[Any] = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowercase ) , version.parse(__lowercase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : int = f"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R"^[\w_\-\d]+$" , __lowercase ):
__UpperCAmelCase : List[str] = requirement, None, None
else:
__UpperCAmelCase : Optional[int] = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , __lowercase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f""" got {requirement}""" )
__UpperCAmelCase : Tuple = match[0]
__UpperCAmelCase : Optional[Any] = want_full.split("," ) # there could be multiple requirements
__UpperCAmelCase : Optional[int] = {}
for w in want_range:
__UpperCAmelCase : int = re.findall(R"^([\s!=<>]{1,2})(.+)" , __lowercase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f""" but got {requirement}""" )
__UpperCAmelCase : Any = match[0]
__UpperCAmelCase : int = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__UpperCAmelCase : Union[str, Any] = '.'.join([str(__lowercase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return
# check if any version is installed
try:
__UpperCAmelCase : Dict = importlib.metadata.version(__lowercase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The \'{requirement}\' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : int = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(__lowercase , __lowercase )
| 703 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : int = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
_a : Optional[Any] = '''Muhammad Umer Farooq'''
_a : List[Any] = '''MIT'''
_a : List[str] = '''1.0.0'''
_a : Union[str, Any] = '''Muhammad Umer Farooq'''
_a : Any = '''[email protected]'''
_a : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __A (__a ):
def __init__( self , UpperCamelCase_ ):
super().__init__()
__UpperCAmelCase : str = []
__UpperCAmelCase : List[Any] = domain
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__UpperCAmelCase : Optional[int] = parse.urljoin(self.domain , snake_case__ )
self.urls.append(snake_case__ )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
return ".".join(get_sub_domain_name(lowerCAmelCase__ ).split("." )[-2:] )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return parse.urlparse(lowerCAmelCase__ ).netloc
def _lowercase ( lowerCamelCase__ = "https://github.com" ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : str = get_domain_name(lowerCAmelCase__ )
# Initialize the parser
__UpperCAmelCase : Tuple = Parser(lowerCAmelCase__ )
try:
# Open URL
__UpperCAmelCase : int = requests.get(lowerCAmelCase__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__UpperCAmelCase : List[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__UpperCAmelCase : Optional[Any] = requests.get(lowerCAmelCase__ )
# Get the valid email.
__UpperCAmelCase : Union[str, Any] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCAmelCase__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCAmelCase__ )
if __name__ == "__main__":
_a : Optional[Any] = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 705 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 0 |
def _lowercase ( lowerCamelCase__ = 5000_0000 ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : int = set()
__UpperCAmelCase : List[str] = int((limit - 24) ** (1 / 2) )
__UpperCAmelCase : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase_ ) ) )
for primea in primes:
__UpperCAmelCase : Union[str, Any] = primea * primea
for primea in primes:
__UpperCAmelCase : Dict = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__UpperCAmelCase : int = primea * primea * primea * primea
__UpperCAmelCase : int = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase_ )
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Dict = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "open-llama"
def __init__( self , UpperCamelCase_=10_00_00 , UpperCamelCase_=40_96 , UpperCamelCase_=1_10_08 , UpperCamelCase_=32 , UpperCamelCase_=32 , UpperCamelCase_="silu" , UpperCamelCase_=20_48 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-6 , UpperCamelCase_=True , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[int] = rms_norm_eps
__UpperCAmelCase : List[Any] = use_cache
__UpperCAmelCase : int = kwargs.pop(
"use_memorry_efficient_attention" , UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : str = attention_dropout_prob
__UpperCAmelCase : Union[str, Any] = use_stable_embedding
__UpperCAmelCase : List[str] = shared_input_output_embedding
__UpperCAmelCase : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ , )
def _snake_case ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
__UpperCAmelCase : List[Any] = self.rope_scaling.get("type" , UpperCamelCase_ )
__UpperCAmelCase : Dict = self.rope_scaling.get("factor" , UpperCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 707 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=12 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0 , UpperCamelCase_=None , ):
__UpperCAmelCase : str = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Dict = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Optional[Any] = projection_dim
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : int = dropout
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Union[str, Any] = bos_token_id
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
__UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__UpperCAmelCase : int = input_mask.numpy()
__UpperCAmelCase , __UpperCAmelCase : Dict = input_mask.shape
__UpperCAmelCase : Optional[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = TFBlipTextModel(config=_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __A (lowerCAmelCase__ , unittest.TestCase ):
snake_case :List[str] = (TFBlipTextModel,) if is_tf_available() else ()
snake_case :str = False
snake_case :Tuple = False
snake_case :Optional[Any] = False
def _snake_case ( self ):
__UpperCAmelCase : Dict = BlipTextModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def _snake_case ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _snake_case ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TFBlipTextModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , UpperCamelCase_=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_SCREAMING_SNAKE_CASE )
| 708 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __A (metaclass=_A ):
snake_case :Union[str, Any] = ["torch", "transformers", "onnx"]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __A (metaclass=_A ):
snake_case :int = ["torch", "transformers", "onnx"]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __A (metaclass=_A ):
snake_case :Dict = ["torch", "transformers", "onnx"]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __A (metaclass=_A ):
snake_case :Optional[Any] = ["torch", "transformers", "onnx"]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __A (metaclass=_A ):
snake_case :Any = ["torch", "transformers", "onnx"]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class __A (metaclass=_A ):
snake_case :Tuple = ["torch", "transformers", "onnx"]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _snake_case ( cls , *UpperCamelCase_ , **UpperCamelCase_ ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 709 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_a : Optional[int] = logging.getLogger()
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Any = {}
__UpperCAmelCase : str = os.path.join(lowerCamelCase__ , "all_results.json" )
if os.path.exists(lowerCamelCase__ ):
with open(lowerCamelCase__ , "r" ) as f:
__UpperCAmelCase : List[str] = json.load(lowerCamelCase__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
_a : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __A (snake_case__ ):
def _snake_case ( self ):
import xla_spawn
__UpperCAmelCase : str = self.get_auto_remove_tmp_dir()
__UpperCAmelCase : List[str] = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_A , "argv" , _A ):
__UpperCAmelCase : Optional[Any] = time()
xla_spawn.main()
__UpperCAmelCase : Optional[Any] = time()
__UpperCAmelCase : str = get_results(_A )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def _snake_case ( self ):
import xla_spawn
__UpperCAmelCase : Optional[Any] = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_A , "argv" , _A ):
xla_spawn.main()
| 710 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_a : List[str] = None
_a : Any = logging.get_logger(__name__)
_a : List[str] = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_a : List[str] = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_a : Union[str, Any] = {
"google/rembert": 256,
}
_a : Union[str, Any] = "▁"
class __A (__magic_name__ ):
snake_case :List[Any] = VOCAB_FILES_NAMES
snake_case :Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = RemBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_="[CLS]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="<unk>" , UpperCamelCase_="[SEP]" , UpperCamelCase_="<pad>" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , **UpperCamelCase_ , ):
__UpperCAmelCase : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
__UpperCAmelCase : int = do_lower_case
__UpperCAmelCase : Union[str, Any] = remove_space
__UpperCAmelCase : Union[str, Any] = keep_accents
__UpperCAmelCase : List[str] = vocab_file
__UpperCAmelCase : int = False if not self.vocab_file else True
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
__UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(__a ):
logger.error("Vocabulary path ({}) should be a directory".format(__a ) )
return
__UpperCAmelCase : Optional[Any] = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 711 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
_a : Union[str, Any] = get_logger(__name__)
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=None ):
__UpperCAmelCase : Union[str, Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
__UpperCAmelCase : Any = module._original_module if isinstance(lowerCamelCase_ , _PatchedModuleObj ) else module
class __A :
snake_case :Tuple = []
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
__UpperCAmelCase : List[str] = obj
__UpperCAmelCase : List[Any] = target
__UpperCAmelCase : int = new
__UpperCAmelCase : List[Any] = target.split("." )[0]
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : List[str] = attrs or []
def __enter__( self ):
__UpperCAmelCase : Union[str, Any] = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCamelCase_ ) ):
try:
__UpperCAmelCase : Optional[Any] = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__UpperCAmelCase : Optional[Any] = getattr(self.obj , lowerCamelCase_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCamelCase_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__UpperCAmelCase : Optional[int] = obj_attr
# patch at top level
setattr(self.obj , lowerCamelCase_ , _PatchedModuleObj(lowerCamelCase_ , attrs=self.attrs ) )
__UpperCAmelCase : Optional[int] = getattr(self.obj , lowerCamelCase_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCamelCase_ , lowerCamelCase_ , _PatchedModuleObj(getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , attrs=self.attrs ) )
__UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
# finally set the target attribute
setattr(lowerCamelCase_ , lowerCamelCase_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__UpperCAmelCase : str = getattr(import_module(".".join(lowerCamelCase_ ) ) , lowerCamelCase_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCamelCase_ ) is attr_value:
__UpperCAmelCase : Optional[int] = getattr(self.obj , lowerCamelCase_ )
setattr(self.obj , lowerCamelCase_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__UpperCAmelCase : Union[str, Any] = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowerCamelCase_ , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *UpperCamelCase_ ):
for attr in list(self.original ):
setattr(self.obj , lowerCamelCase_ , self.original.pop(lowerCamelCase_ ) )
def _snake_case ( self ):
self.__enter__()
self._active_patches.append(self )
def _snake_case ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 712 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 0 |
'''simple docstring'''
from manim import *
class __A (__lowerCamelCase ):
'''simple docstring'''
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
__UpperCAmelCase : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__UpperCAmelCase : Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 )
__UpperCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
__UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Any = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__UpperCAmelCase : int = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__UpperCAmelCase : Union[str, Any] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__UpperCAmelCase : List[Any] = Text("CPU" , font_size=24 )
__UpperCAmelCase : List[str] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase_ )
__UpperCAmelCase : Any = [mem.copy() for i in range(4 )]
__UpperCAmelCase : Optional[Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__UpperCAmelCase : Tuple = Text("GPU" , font_size=24 )
__UpperCAmelCase : List[Any] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
__UpperCAmelCase : Union[str, Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__UpperCAmelCase : Tuple = Text("Model" , font_size=24 )
__UpperCAmelCase : List[Any] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : str = []
for i, rect in enumerate(UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCamelCase_ , opacity=0.8 )
target.move_to(UpperCamelCase_ )
model_arr.append(UpperCamelCase_ )
__UpperCAmelCase : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCamelCase_ )
self.add(*UpperCamelCase_ , *UpperCamelCase_ )
__UpperCAmelCase : List[Any] = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase : List[Any] = [meta_mem.copy() for i in range(6 )]
__UpperCAmelCase : List[Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__UpperCAmelCase : Any = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__UpperCAmelCase : List[Any] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 )
__UpperCAmelCase : Any = Text("Disk" , font_size=24 )
__UpperCAmelCase : Union[str, Any] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ )
disk.move_to([-4, -1.2_5, 0] )
self.add(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCAmelCase : int = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Any = MarkupText(
f"""<span fgcolor=\'{BLUE}\'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase_ )
__UpperCAmelCase : str = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ ) )
__UpperCAmelCase : int = Square(0.3 )
input.set_fill(UpperCamelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCamelCase_ , buff=0.5 )
self.play(Write(UpperCamelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCamelCase_ , buff=0.0_2 )
self.play(MoveToTarget(UpperCamelCase_ ) )
self.play(FadeOut(UpperCamelCase_ ) )
__UpperCAmelCase : List[Any] = Arrow(start=UpperCamelCase_ , end=UpperCamelCase_ , color=UpperCamelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCamelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__UpperCAmelCase : List[Any] = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=3 ) )
__UpperCAmelCase : List[str] = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(UpperCamelCase_ ) , Circumscribe(model_arr[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__UpperCAmelCase : str = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , UpperCamelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
__UpperCAmelCase : Optional[Any] = AnimationGroup(
FadeOut(UpperCamelCase_ , run_time=0.5 ) , MoveToTarget(UpperCamelCase_ , run_time=0.5 ) , FadeIn(UpperCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCamelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__UpperCAmelCase : List[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCamelCase_ , **UpperCamelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCamelCase_ , **UpperCamelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase_ , **UpperCamelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__UpperCAmelCase : str = a_c
__UpperCAmelCase : Any = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(UpperCamelCase_ ) , FadeOut(UpperCamelCase_ , run_time=0.5 ) , )
__UpperCAmelCase : Optional[int] = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase_ , run_time=3 ) , MoveToTarget(UpperCamelCase_ ) )
self.wait()
| 713 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_a : Optional[int] = logging.getLogger(__name__)
@dataclass
class __A (_UpperCamelCase ):
snake_case :Optional[int] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
snake_case :List[str] = field(default=_UpperCamelCase , metadata={"help": "Whether to SortishSamler or not."} )
snake_case :Optional[int] = field(
default=_UpperCamelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
snake_case :List[Any] = field(default=_UpperCamelCase , metadata={"help": "whether to use adafactor"} )
snake_case :Optional[int] = field(
default=_UpperCamelCase , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
snake_case :Optional[Any] = field(
default=_UpperCamelCase , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
snake_case :Any = field(default=_UpperCamelCase , metadata={"help": "Dropout probability. Goes into model.config."} )
snake_case :List[str] = field(
default=_UpperCamelCase , metadata={"help": "Attention dropout probability. Goes into model.config."} )
snake_case :Optional[int] = field(
default="linear" , metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 714 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.