code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __UpperCamelCase ( lowercase__ : int = 1_50_00_00 ):
'''simple docstring'''
__lowercase =defaultdict(lowercase__ )
__lowercase =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, lowercase__, 2 ):
if gcd(lowercase__, lowercase__ ) > 1:
continue
__lowercase =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowercase__, limit + 1, lowercase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 119 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __snake_case ( __lowerCAmelCase ):
a__ = """deit"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=2_24 , lowercase=16 , lowercase=3 , lowercase=True , lowercase=16 , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowercase)
a__: Optional[int] = hidden_size
a__: List[Any] = num_hidden_layers
a__: Optional[Any] = num_attention_heads
a__: Union[str, Any] = intermediate_size
a__: Optional[Any] = hidden_act
a__: Optional[Any] = hidden_dropout_prob
a__: List[Any] = attention_probs_dropout_prob
a__: List[str] = initializer_range
a__: Optional[int] = layer_norm_eps
a__: Dict = image_size
a__: Dict = patch_size
a__: List[Any] = num_channels
a__: Optional[Any] = qkv_bias
a__: List[Any] = encoder_stride
class __snake_case ( __lowerCAmelCase ):
a__ = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCamelCase_ ( self) -> float:
'''simple docstring'''
return 1e-4
| 217 | """simple docstring"""
class __snake_case :
def __init__( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: int = n
a__: int = [None] * self.n
a__: List[str] = 0 # index of the first element
a__: Any = 0
a__: Any = 0
def __len__( self) -> int:
'''simple docstring'''
return self.size
def lowerCamelCase_ ( self) -> bool:
'''simple docstring'''
return self.size == 0
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase_ ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL')
a__: Tuple = data
a__: Dict = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW')
a__: str = self.array[self.front]
a__: Any = None
a__: Optional[Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 217 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : Dict = parent
snake_case_ : int = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Optional[Any] = min_resolution
snake_case_ : Optional[int] = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : int = do_normalize
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCAmelCase ( A__, unittest.TestCase ):
lowerCamelCase_ : int = ImageGPTImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = ImageGPTImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''clusters''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
snake_case_ : Dict = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCamelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , __lowerCamelCase )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Union[str, Any] = os.path.join(__lowerCamelCase , '''image_processor.json''' )
image_processor_first.to_json_file(__lowerCamelCase )
snake_case_ : int = self.image_processing_class.from_json_file(__lowerCamelCase ).to_dict()
snake_case_ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCamelCase )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__lowerCamelCase )
snake_case_ : Optional[Any] = self.image_processing_class.from_pretrained(__lowerCamelCase ).to_dict()
snake_case_ : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__lowerCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __lowerCamelCase )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
snake_case_ : int = Image.open(dataset[4]['''file'''] )
snake_case_ : Dict = Image.open(dataset[5]['''file'''] )
snake_case_ : Tuple = [imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
snake_case_ : List[str] = prepare_images()
# test non-batched
snake_case_ : List[Any] = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
snake_case_ : List[str] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __lowerCamelCase )
# test batched
snake_case_ : List[str] = image_processing(__lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
snake_case_ : Optional[int] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __lowerCamelCase )
| 60 |
_SCREAMING_SNAKE_CASE : List[str] = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
_SCREAMING_SNAKE_CASE : str = ['''a''', '''b''', '''c''', '''d''', '''e''']
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = start
# add current to visited
visited.append(_A )
SCREAMING_SNAKE_CASE__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(_A , _A , _A )
# if all neighbors visited add current to sort
sort.append(_A )
# if all vertices haven't been visited select a new one to visit
if len(_A ) != len(_A ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE__ = topological_sort(_A , _A , _A )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = topological_sort('''a''', [], [])
print(sort)
| 493 | 0 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
lowercase : Optional[int] = 'us-east-1' # defaults region
@dataclass
class A :
__magic_name__ = 42
__magic_name__ = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__magic_name__ = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5500,
}
__magic_name__ = {**hyperparameters, '''max_steps''': 1000}
@property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return F'{self.framework}-transfromers-test'
@property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return F'./tests/sagemaker/scripts/{self.framework}'
@property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = SageMakerTestEnvironment(framework=request.cls.framework )
| 343 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 50 ):
'''simple docstring'''
A : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 343 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def snake_case_ (UpperCamelCase : Sequence[int] | None = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
_a = nums[0]
for i in range(1 , len(UpperCamelCase ) ):
_a = nums[i]
_a = max(UpperCamelCase , ans + num , UpperCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_snake_case : List[str] = int(input('Enter number of elements : ').strip())
_snake_case : Union[str, Any] = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 459 | 0 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Dict = text, pattern
lowercase__ : Optional[Any] = len(UpperCamelCase__ ), len(UpperCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCAmelCase__( self ) -> List[Any]:
# searches pattern in text and returns index positions
lowercase__ : Any = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ : Any = self.mismatch_in_text(UpperCamelCase__ )
if mismatch_index == -1:
positions.append(UpperCamelCase__ )
else:
lowercase__ : Optional[int] = self.match_in_pattern(self.text[mismatch_index] )
lowercase__ : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case = 'ABAABA'
__snake_case = 'AB'
__snake_case = BoyerMooreSearch(text, pattern)
__snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 709 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__snake_case = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None) | 128 | 0 |
from __future__ import annotations
from collections import deque
class __magic_name__ :
'''simple docstring'''
def __init__( self: Optional[Any] , _lowerCamelCase: list[str] ):
SCREAMING_SNAKE_CASE_ = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(_lowerCamelCase )
self.set_fail_transitions()
def _A ( self: Tuple , _lowerCamelCase: int , _lowerCamelCase: str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _A ( self: int , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = 0
for character in keyword:
SCREAMING_SNAKE_CASE_ = self.find_next_state(_lowerCamelCase , _lowerCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE_ = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE_ = next_state
self.adlist[current_state]["output"].append(_lowerCamelCase )
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 0
while q:
SCREAMING_SNAKE_CASE_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.adlist[r]['''fail_state''']
while (
self.find_next_state(_lowerCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE_ = self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE_ = self.find_next_state(
_lowerCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def _A ( self: List[Any] , _lowerCamelCase: str ):
SCREAMING_SNAKE_CASE_ = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE_ = 0
for i in range(len(_lowerCamelCase ) ):
while (
self.find_next_state(_lowerCamelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE_ = self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE_ = self.find_next_state(_lowerCamelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE_ = []
result[key].append(i - len(_lowerCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 234 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
SCREAMING_SNAKE_CASE_ = F"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , '''README.md''' )
print(F"Generating {path}" )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(_lowerCAmelCase )
# make sure we are under the root of the project
__SCREAMING_SNAKE_CASE =Path(__file__).resolve().parent.parent.parent
__SCREAMING_SNAKE_CASE =repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE =model_name.split("""-""")
__SCREAMING_SNAKE_CASE =model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 234 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Model type selected in the list: " + ", ".join(UpperCamelCase__ )} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowerCamelCase_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase_ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowerCamelCase_ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowerCamelCase_ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowerCamelCase_ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCamelCase_ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCamelCase_ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowerCamelCase_ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "train"
lowerCamelCase_ = "dev"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self :Dict , __A :SquadDataTrainingArguments , __A :PreTrainedTokenizer , __A :Optional[int] = None , __A :Union[str, Split] = Split.train , __A :Optional[bool] = False , __A :Optional[str] = None , __A :Optional[str] = "pt" , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = is_language_sensitive
SCREAMING_SNAKE_CASE__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A , __A ):
try:
SCREAMING_SNAKE_CASE__ = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
SCREAMING_SNAKE_CASE__ = mode
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ = """v2""" if args.version_2_with_negative else """v1"""
SCREAMING_SNAKE_CASE__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ = cached_features_file + """.lock"""
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
SCREAMING_SNAKE_CASE__ = self.old_features["""features"""]
SCREAMING_SNAKE_CASE__ = self.old_features.get("""dataset""" , __A )
SCREAMING_SNAKE_CASE__ = self.old_features.get("""examples""" , __A )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
""" future run""" )
else:
if mode == Split.dev:
SCREAMING_SNAKE_CASE__ = self.processor.get_dev_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE__ = self.processor.get_train_examples(args.data_dir )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__A , )
SCREAMING_SNAKE_CASE__ = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :int ) -> Union[str, Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self :Any , __A :Dict ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.features[i]
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.input_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.attention_mask , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.cls_index , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.p_mask , dtype=torch.float )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.is_impossible , dtype=torch.float )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.start_position , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs | 703 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :List[Any] , *__A :Tuple , **__A :Dict ) -> None:
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A ) | 59 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
class lowercase(_lowercase ):
__snake_case: Optional[Any] = 'timm_backbone'
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
a__ = backbone
a__ = num_channels
a__ = features_only
a__ = use_pretrained_backbone
a__ = True
a__ = out_indices if out_indices is not None else (-1,)
| 273 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a : int = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __magic_name__ ( UpperCamelCase : List[Any] ) -> Union[str, Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
a__ = list(s_dict.keys() )
for key in keys:
a__ = r'.*/layers_(\d+)'
a__ = key
if re.match(UpperCamelCase , UpperCamelCase ):
a__ = re.sub(r'layers_(\d+)' , r'block/\1/layer' , UpperCamelCase )
a__ = r'(encoder|decoder)\/'
if re.match(UpperCamelCase , UpperCamelCase ):
a__ = re.match(UpperCamelCase , UpperCamelCase ).groups()
if groups[0] == "encoder":
a__ = re.sub(r'/mlp/' , r'/1/mlp/' , UpperCamelCase )
a__ = re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , UpperCamelCase )
elif groups[0] == "decoder":
a__ = re.sub(r'/mlp/' , r'/2/mlp/' , UpperCamelCase )
a__ = re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , UpperCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
a__ = new_key.replace(UpperCamelCase , UpperCamelCase )
print(f'{key} -> {new_key}' )
a__ = s_dict.pop(UpperCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
a__ = s_dict[key].shape[0]
a__ = s_dict[key]
for idx in range(UpperCamelCase ):
a__ = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(UpperCamelCase )
return s_dict
a : List[str] = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __magic_name__ ( UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] ) -> Optional[int]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCamelCase , 'r' ) as f:
a__ = f.read()
a__ = re.findall(r'(.*) = ([0-9.]*)' , UpperCamelCase )
a__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
a__ = float(UpperCamelCase ) if '.' in value else int(UpperCamelCase )
a__ = re.findall(r'(.*activations) = \(\'(.*)\',\)' , UpperCamelCase )[0]
a__ = str(activation[1] )
a__ = num_experts
a__ = SwitchTransformersConfig(**UpperCamelCase )
return config
def __magic_name__ ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[str]="./" , UpperCamelCase : Any=8 ) -> Union[str, Any]:
# Initialise PyTorch model
print(f'Loading flax weights from : {flax_checkpoint_path}' )
a__ = checkpoints.load_tax_checkpoint(UpperCamelCase )
if gin_file is not None:
a__ = convert_gin_to_config(UpperCamelCase , UpperCamelCase )
else:
a__ = SwitchTransformersConfig.from_pretrained(UpperCamelCase )
a__ = SwitchTransformersForConditionalGeneration(UpperCamelCase )
a__ = flax_params['target']
a__ = flatten_dict(UpperCamelCase , sep='/' )
a__ = rename_keys(UpperCamelCase )
a__ = unflatten_dict(UpperCamelCase , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
a : List[str] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 273 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCamelCase (a__ :int = 3 ):
"""simple docstring"""
if isinstance(a__ , a__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(a__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
UpperCamelCase__ = QuantumRegister(a__ , """qr""" )
UpperCamelCase__ = ClassicalRegister(a__ , """cr""" )
UpperCamelCase__ = QuantumCircuit(a__ , a__ )
UpperCamelCase__ = number_of_qubits
for i in range(a__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(a__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , a__ , a__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(a__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(a__ , a__ )
# simulate with 10000 shots
UpperCamelCase__ = Aer.get_backend("""qasm_simulator""" )
UpperCamelCase__ = execute(a__ , a__ , shots=1_0000 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 710 |
import argparse
import datetime
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
UpperCamelCase__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
UpperCamelCase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(a__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
UpperCamelCase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
UpperCamelCase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
UpperCamelCase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
UpperCamelCase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
UpperCamelCase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
UpperCamelCase__ = datetime.date(int(a__ ) , int(a__ ) , int(a__ ) )
# Start math
if m <= 2:
UpperCamelCase__ = y - 1
UpperCamelCase__ = m + 12
# maths var
UpperCamelCase__ = int(str(a__ )[:2] )
UpperCamelCase__ = int(str(a__ )[2:] )
UpperCamelCase__ = int(2.6 * m - 5.39 )
UpperCamelCase__ = int(c / 4 )
UpperCamelCase__ = int(k / 4 )
UpperCamelCase__ = int(d + k )
UpperCamelCase__ = int(t + u + v + x )
UpperCamelCase__ = int(z - (2 * c) )
UpperCamelCase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
UpperCamelCase__ = f"""Your date {date_input}, is a {days[str(a__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
UpperCamelCase__ = parser.parse_args()
zeller(args.date_input)
| 548 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = KandinskyVaaPriorPipeline
SCREAMING_SNAKE_CASE__ : Dict = ['prompt']
SCREAMING_SNAKE_CASE__ : Optional[int] = ['prompt', 'negative_prompt']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE__ : str = False
@property
def A_ ( self ):
'''simple docstring'''
return 3_2
@property
def A_ ( self ):
'''simple docstring'''
return 3_2
@property
def A_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self ):
'''simple docstring'''
return 1_0_0
@property
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowerCamelCase__ )
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
UpperCAmelCase : str = PriorTransformer(**lowerCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
UpperCAmelCase : str = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
UpperCAmelCase : Optional[Any] = CLIPVisionModelWithProjection(lowerCamelCase__ )
return model
@property
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_resize=lowerCamelCase__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.dummy_prior
UpperCAmelCase : List[Any] = self.dummy_image_encoder
UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
UpperCAmelCase : Optional[int] = self.dummy_tokenizer
UpperCAmelCase : Optional[int] = self.dummy_image_processor
UpperCAmelCase : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=lowerCamelCase__ , clip_sample_range=10.0 , )
UpperCAmelCase : str = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def A_ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("mps" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(lowerCamelCase__ )
else:
UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
UpperCAmelCase : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = "cpu"
UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**lowerCamelCase__ )
UpperCAmelCase : Dict = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
UpperCAmelCase : List[Any] = output.image_embeds
UpperCAmelCase : Optional[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
UpperCAmelCase : Tuple = image[0, -1_0:]
UpperCAmelCase : int = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
UpperCAmelCase : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = torch_device == "cpu"
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , test_mean_pixel_difference=lowerCamelCase__ , )
@skip_mps
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = torch_device == "cpu"
UpperCAmelCase : Optional[int] = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCamelCase__ , test_mean_pixel_difference=lowerCamelCase__ , )
| 679 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
try:
A_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A_ = default
else:
# KEY is set, convert it to True or False.
try:
A_ = strtobool(SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
__lowercase = parse_flag_from_env("""RUN_SLOW""", default=False)
__lowercase = parse_flag_from_env("""RUN_REMOTE""", default=False)
__lowercase = parse_flag_from_env("""RUN_LOCAL""", default=True)
__lowercase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
__lowercase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
__lowercase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
__lowercase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
__lowercase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
__lowercase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
__lowercase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
__lowercase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
A_ = unittest.skip('''test requires faiss''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
A_ = unittest.skip('''test requires regex''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
A_ = unittest.skip('''test requires elasticsearch''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
A_ = unittest.skip('''test requires sqlalchemy''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
A_ = unittest.skip('''test requires PyTorch''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not config.TF_AVAILABLE:
A_ = unittest.skip('''test requires TensorFlow''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
A_ = unittest.skip('''test requires JAX''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
A_ = unittest.skip('''test requires Pillow''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _require_spacy_model(SCREAMING_SNAKE_CASE ):
try:
import spacy # noqa F401
spacy.load(SCREAMING_SNAKE_CASE )
except ImportError:
return unittest.skip('''test requires spacy''' )(SCREAMING_SNAKE_CASE )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(SCREAMING_SNAKE_CASE ) )(SCREAMING_SNAKE_CASE )
else:
return test_case
return _require_spacy_model
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(SCREAMING_SNAKE_CASE )
else:
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
A_ = unittest.skip('''test is slow''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
A_ = unittest.skip('''test is local''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
A_ = unittest.skip('''test is packaged''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
A_ = unittest.skip('''test requires remote''' )(SCREAMING_SNAKE_CASE )
return test_case
def _lowerCamelCase ( *SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(SCREAMING_SNAKE_CASE ) and name.startswith('''test''' ):
for decorator in decorators:
A_ = decorator(SCREAMING_SNAKE_CASE )
setattr(cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return cls
return decorate
class _lowercase ( __lowerCamelCase ):
pass
class _lowercase ( __lowerCamelCase ):
_lowercase : Dict = 0
_lowercase : Any = 1
_lowercase : Tuple = 2
@contextmanager
def _lowerCamelCase ( SCREAMING_SNAKE_CASE=OfflineSimulationMode.CONNECTION_FAILS , SCREAMING_SNAKE_CASE=1E-16 ):
'''simple docstring'''
A_ = requests.Session().request
def timeout_request(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
# Change the url to an invalid url so that the connection hangs
A_ = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
A_ = timeout
try:
return online_request(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
A_ = url
A_ = e.args[0]
A_ = (max_retry_error.args[0].replace('''10.255.255.1''' , f"OfflineMock[{url}]" ),)
A_ = (max_retry_error,)
raise
def raise_connection_error(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=SCREAMING_SNAKE_CASE )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , SCREAMING_SNAKE_CASE ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , SCREAMING_SNAKE_CASE ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) as tmp_dir:
try:
os.chdir(SCREAMING_SNAKE_CASE )
yield
finally:
os.chdir(SCREAMING_SNAKE_CASE )
@contextmanager
def _lowerCamelCase ( ):
'''simple docstring'''
import gc
gc.collect()
A_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _lowerCamelCase ( ):
'''simple docstring'''
import gc
gc.collect()
A_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return deepcopy(SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
try:
return func(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
except HTTPError as err:
if str(SCREAMING_SNAKE_CASE ).startswith('''500''' ) or str(SCREAMING_SNAKE_CASE ).startswith('''502''' ):
pytest.xfail(str(SCREAMING_SNAKE_CASE ) )
raise err
return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE )
class _lowercase :
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Dict ) -> Any:
"""simple docstring"""
A_ = returncode
A_ = stdout
A_ = stderr
async def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
while True:
A_ = await stream.readline()
if line:
callback(SCREAMING_SNAKE_CASE )
else:
break
async def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if echo:
print('''\nRunning: ''' , ''' '''.join(SCREAMING_SNAKE_CASE ) )
A_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A_ = []
A_ = []
def tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" ):
A_ = line.decode('''utf-8''' ).rstrip()
sink.append(SCREAMING_SNAKE_CASE )
if not quiet:
print(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , file=SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stderr , label='''stderr:''' ) ),
] , timeout=SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=180 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
A_ = asyncio.get_event_loop()
A_ = loop.run_until_complete(
_stream_subprocess(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , stdin=SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , quiet=SCREAMING_SNAKE_CASE , echo=SCREAMING_SNAKE_CASE ) )
A_ = ''' '''.join(SCREAMING_SNAKE_CASE )
if result.returncode > 0:
A_ = '''\n'''.join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
A_ = re.sub(R'''^gw''' , '''''' , SCREAMING_SNAKE_CASE , 0 , re.M )
return int(SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = 29500
A_ = pytest_xdist_worker_id()
return port + uniq_delta
| 203 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] ={'vocab_file': 'vocab.txt'}
__lowerCAmelCase : Dict ={
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__lowerCAmelCase : int ={
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
__lowerCAmelCase : List[Any] ={
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = ConvBertTokenizer
def __init__( self :str , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[Any]="[UNK]" , lowerCAmelCase__ :Optional[int]="[SEP]" , lowerCAmelCase__ :Union[str, Any]="[PAD]" , lowerCAmelCase__ :Any="[CLS]" , lowerCAmelCase__ :List[str]="[MASK]" , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=None , **lowerCAmelCase__ :int , ) -> Dict:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Tuple = do_lower_case
__SCREAMING_SNAKE_CASE : List[Any] = strip_accents
__SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Dict = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
def __magic_name__( self :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : str = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 702 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
def __magic_name__( self :Dict , **lowerCAmelCase__ :Any ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase__ )
return config
def __magic_name__( self :str ) -> Optional[Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __magic_name__( self :str ) -> List[str]:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Any = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __magic_name__( self :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Dict = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def __magic_name__( self :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = self.dummy_model()
__SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Any = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = output.prev_sample
__SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 260 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def A_ ( __SCREAMING_SNAKE_CASE : Accelerator , __SCREAMING_SNAKE_CASE : int = 16 ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__SCREAMING_SNAKE_CASE : Dict ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Dict = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__SCREAMING_SNAKE_CASE : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE : Dict = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE : List[Any] = 8
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def A_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __SCREAMING_SNAKE_CASE ) == "1":
__SCREAMING_SNAKE_CASE : int = 2
# New Code #
__SCREAMING_SNAKE_CASE : Tuple = int(args.gradient_accumulation_steps )
# Initialize accelerator
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE : Tuple = config['''lr''']
__SCREAMING_SNAKE_CASE : Any = int(config['''num_epochs'''] )
__SCREAMING_SNAKE_CASE : int = int(config['''seed'''] )
__SCREAMING_SNAKE_CASE : Optional[Any] = int(config['''batch_size'''] )
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
set_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE : List[str] = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE : str = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Dict = model(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Tuple = model(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
def A_ ( ) -> List[str]:
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 158 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 158 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self , _UpperCamelCase ) -> Tuple:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def __a ( self ) -> str:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> int:
lowerCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , torchscript=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __a ( self ) -> str:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , fpaa=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
# set architectures equal to `None`
lowerCAmelCase_ = None
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can\'t do half precision" )
def __a ( self ) -> Dict:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Dict:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = '''sshleifer/tinier_bart'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = '''sshleifer/tinier_bart'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_UpperCAmelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __a ( self ) -> Any:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(_UpperCAmelCase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(_UpperCAmelCase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(_UpperCAmelCase , "train_time.csv" ) , env_info_csv_file=os.path.join(_UpperCAmelCase , "env.csv" ) , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , "env.csv" ) ).exists() )
def __a ( self ) -> int:
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_UpperCamelCase ):
self.assertTrue(hasattr(_UpperCAmelCase , "sequential" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "cumulative" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "current" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , "log.txt" ) , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_UpperCAmelCase )
lowerCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , "log.txt" ) ).exists() )
| 713 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ = torchvision.models.resnetaaa(pretrained=_UpperCamelCase )
lowerCAmelCase_ = list(model.children() )[:-2]
lowerCAmelCase_ = nn.Sequential(*_UpperCamelCase )
lowerCAmelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __a ( self , _UpperCamelCase ) -> Dict:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCAmelCase_ = self.pool(self.model(_UpperCamelCase ) )
lowerCAmelCase_ = torch.flatten(_UpperCamelCase , start_dim=2 )
lowerCAmelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = [json.loads(_UpperCamelCase ) for l in open(_UpperCamelCase )]
lowerCAmelCase_ = os.path.dirname(_UpperCamelCase )
lowerCAmelCase_ = tokenizer
lowerCAmelCase_ = labels
lowerCAmelCase_ = len(_UpperCamelCase )
lowerCAmelCase_ = max_seq_length
lowerCAmelCase_ = transforms
def __len__( self ) -> Any:
return len(self.data )
def __getitem__( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=_UpperCamelCase ) )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase_ = sentence[: self.max_seq_length]
lowerCAmelCase_ = torch.zeros(self.n_classes )
lowerCAmelCase_ = 1
lowerCAmelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
lowerCAmelCase_ = self.transforms(_UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __a ( self ) -> str:
lowerCAmelCase_ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowerCamelCase__ ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = [len(row["sentence"] ) for row in batch]
lowerCAmelCase_ , lowerCAmelCase_ = len(__lowerCAmelCase ), max(__lowerCAmelCase )
lowerCAmelCase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
lowerCAmelCase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
lowerCAmelCase_ = input_row["sentence"]
lowerCAmelCase_ = 1
lowerCAmelCase_ = torch.stack([row["image"] for row in batch] )
lowerCAmelCase_ = torch.stack([row["label"] for row in batch] )
lowerCAmelCase_ = torch.stack([row["image_start_token"] for row in batch] )
lowerCAmelCase_ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 279 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__SCREAMING_SNAKE_CASE = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = 'hopper-medium-v2'
__SCREAMING_SNAKE_CASE = gym.make(env_name)
__SCREAMING_SNAKE_CASE = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
__SCREAMING_SNAKE_CASE = env.reset()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__SCREAMING_SNAKE_CASE = pipeline(obs, planning_horizon=32)
# execute action in environment
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = env.step(denorm_actions)
__SCREAMING_SNAKE_CASE = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
__SCREAMING_SNAKE_CASE = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""") | 553 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
lowerCAmelCase :Any = ort.SessionOptions()
lowerCAmelCase :Dict = False
return options
def UpperCAmelCase__ ( self : Tuple ) -> str:
lowerCAmelCase :Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowerCAmelCase :str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowerCAmelCase :List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
lowerCAmelCase :Tuple = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase :Optional[Any] = 'A red cat sitting on a park bench'
lowerCAmelCase :Optional[Any] = np.random.RandomState(0 )
lowerCAmelCase :Union[str, Any] = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase , output_type='np' , )
lowerCAmelCase :Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2 | 553 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
_SCREAMING_SNAKE_CASE : Union[str, Any] = F'''https://www.google.com/search?q={query}&num=100'''
_SCREAMING_SNAKE_CASE : Dict = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
_SCREAMING_SNAKE_CASE : Optional[int] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
_SCREAMING_SNAKE_CASE : Dict = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 712 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
_SCREAMING_SNAKE_CASE : Dict = 637_8137.0
_SCREAMING_SNAKE_CASE : Any = 635_6752.31_4245
_SCREAMING_SNAKE_CASE : List[Any] = 637_8137
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
lowerCamelCase_ = (AXIS_A - AXIS_B) / AXIS_A
lowerCamelCase_ = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
lowerCamelCase_ = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
lowerCamelCase_ = radians(_lowerCamelCase )
lowerCamelCase_ = radians(_lowerCamelCase )
# Equation
lowerCamelCase_ = sin((phi_a - phi_a) / 2 )
lowerCamelCase_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowerCamelCase_ = sqrt(sin_sq_phi + (cos(_lowerCamelCase ) * cos(_lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowerCamelCase = get_logger(__name__)
_lowerCamelCase = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class __A :
"""simple docstring"""
@add_start_docstrings(a__)
def __call__( self , a__ , a__):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
class __A :
"""simple docstring"""
@add_start_docstrings(a__)
def __call__( self , a__ , a__):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
class __A ( lowerCamelCase__ ):
"""simple docstring"""
@add_start_docstrings(a__)
def __call__( self , a__ , a__ , a__ , **a__):
"""simple docstring"""
for processor in self:
_lowerCamelCase : Union[str, Any] = inspect.signature(processor.__call__).parameters
if len(a__) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys())} for """
F"""{processor.__class__} are passed to the logits processor.""")
_lowerCamelCase : Tuple = processor(a__ , a__ , a__ , **a__)
else:
_lowerCamelCase : Optional[int] = processor(a__ , a__ , a__)
return scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__):
"""simple docstring"""
if not isinstance(a__ , a__) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""")
_lowerCamelCase : Tuple = temperature
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Any = scores / self.temperature
return scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , a__ = -float('''Inf''') , a__ = 1):
"""simple docstring"""
if not isinstance(a__ , a__) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""")
if not isinstance(a__ , a__) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""")
_lowerCamelCase : int = top_p
_lowerCamelCase : Union[str, Any] = filter_value
_lowerCamelCase : Optional[Any] = min_tokens_to_keep
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : int = lax.top_k(a__ , scores.shape[-1])
_lowerCamelCase : int = jnp.full_like(a__ , self.filter_value)
_lowerCamelCase : Optional[int] = jax.nn.softmax(a__ , axis=-1).cumsum(axis=-1)
_lowerCamelCase : List[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_lowerCamelCase : Optional[Any] = jnp.roll(a__ , 1)
score_mask |= score_mask.at[:, 0].set(a__)
# min tokens to keep
_lowerCamelCase : str = score_mask.at[:, : self.min_tokens_to_keep].set(a__)
_lowerCamelCase : int = jnp.where(a__ , a__ , a__)
_lowerCamelCase : Any = jax.lax.sort_key_val(a__ , a__)[-1]
return next_scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , a__ = -float('''Inf''') , a__ = 1):
"""simple docstring"""
if not isinstance(a__ , a__) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""")
_lowerCamelCase : List[Any] = max(a__ , a__)
_lowerCamelCase : Union[str, Any] = filter_value
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase, _lowerCamelCase : List[Any] = scores.shape
_lowerCamelCase : List[Any] = jnp.full(batch_size * vocab_size , self.filter_value)
_lowerCamelCase : Union[str, Any] = min(self.top_k , scores.shape[-1]) # Safety check
_lowerCamelCase, _lowerCamelCase : int = lax.top_k(a__ , a__)
_lowerCamelCase : int = jnp.broadcast_to((jnp.arange(a__) * vocab_size)[:, None] , (batch_size, topk)).flatten()
_lowerCamelCase : List[Any] = topk_scores.flatten()
_lowerCamelCase : Any = topk_indices.flatten() + shift
_lowerCamelCase : str = next_scores_flat.at[topk_indices_flat].set(a__)
_lowerCamelCase : Optional[int] = next_scores_flat.reshape(a__ , a__)
return next_scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__):
"""simple docstring"""
_lowerCamelCase : Dict = bos_token_id
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : List[str] = jnp.full(scores.shape , -float('''inf'''))
_lowerCamelCase : Dict = 1 - jnp.bool_(cur_len - 1)
_lowerCamelCase : List[str] = jnp.where(a__ , new_scores.at[:, self.bos_token_id].set(0) , a__)
return scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , a__):
"""simple docstring"""
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Any = eos_token_id
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = jnp.full(scores.shape , -float('''inf'''))
_lowerCamelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1)
_lowerCamelCase : List[str] = jnp.where(a__ , new_scores.at[:, self.eos_token_id].set(0) , a__)
return scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , a__):
"""simple docstring"""
if not isinstance(a__ , a__) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""")
if not isinstance(a__ , a__) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""")
_lowerCamelCase : List[Any] = min_length
_lowerCamelCase : Any = eos_token_id
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Tuple = 1 - jnp.clip(cur_len - self.min_length , 0 , 1)
_lowerCamelCase : int = jnp.where(a__ , scores.at[:, self.eos_token_id].set(-float('''inf''')) , a__)
return scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , a__):
"""simple docstring"""
_lowerCamelCase : int = list(a__)
_lowerCamelCase : Union[str, Any] = begin_index
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = 1 - jnp.bool_(cur_len - self.begin_index)
_lowerCamelCase : Dict = jnp.where(a__ , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''')) , a__)
return scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = list(a__)
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float('''inf'''))
return scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = dict(a__)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_lowerCamelCase : List[str] = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
_lowerCamelCase : List[Any] = force_token_array.at[index].set(a__)
_lowerCamelCase : Tuple = jnp.intaa(a__)
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
def _force_token(a__):
_lowerCamelCase : Any = scores.shape[0]
_lowerCamelCase : Tuple = self.force_token_array[generation_idx]
_lowerCamelCase : Dict = jnp.ones_like(a__ , dtype=scores.dtype) * -float('''inf''')
_lowerCamelCase : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype)
_lowerCamelCase : Optional[int] = lax.dynamic_update_slice(a__ , a__ , (0, current_token))
return new_scores
_lowerCamelCase : Any = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(a__) , lambda: scores , ) , )
return scores
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : str = generate_config.eos_token_id
_lowerCamelCase : List[str] = generate_config.no_timestamps_token_id
_lowerCamelCase : str = generate_config.no_timestamps_token_id + 1
_lowerCamelCase : List[Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(a__ , '''max_initial_timestamp_index'''):
_lowerCamelCase : str = generate_config.max_initial_timestamp_index
else:
_lowerCamelCase : List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_lowerCamelCase : Dict = model_config.vocab_size
def __call__( self , a__ , a__ , a__):
"""simple docstring"""
_lowerCamelCase : List[Any] = scores.at[:, self.no_timestamps_token_id].set(-float('''inf'''))
def handle_pairs(a__ , a__):
_lowerCamelCase : Optional[Any] = jnp.where((cur_len - self.begin_index) >= 1 , a__ , a__)
_lowerCamelCase : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , a__ , )
_lowerCamelCase : Tuple = jnp.where((cur_len - self.begin_index) < 2 , a__ , a__)
_lowerCamelCase : int = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , a__ , a__ , )
return jnp.where(
a__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''')) , scores_k.at[: self.eos_token_id].set(-float('''inf''')) , ) , a__ , )
_lowerCamelCase : Any = jax.vmap(a__)(a__ , a__)
_lowerCamelCase : Any = jnp.where(cur_len == self.begin_index , a__ , a__)
_lowerCamelCase : Any = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , a__ , )
_lowerCamelCase : List[Any] = self.timestamp_begin + self.max_initial_timestamp_index
_lowerCamelCase : str = jnp.where(
a__ , scores.at[:, last_allowed + 1 :].set(-float('''inf''')) , a__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
_lowerCamelCase : str = jax.nn.log_softmax(a__ , axis=-1)
def handle_cumulative_probs(a__ , a__):
_lowerCamelCase : Any = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1)
_lowerCamelCase : str = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''')) , a__ , )
_lowerCamelCase : List[Any] = jax.vmap(a__)(a__ , a__)
return scores
| 114 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *a__ , **a__):
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__)
| 114 | 1 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def UpperCamelCase__ ( __magic_name__ : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[Any] = min(_A ) # min() finds the minimum value
snake_case__ : Optional[int] = max(_A ) # max() finds the maximum value
snake_case__ : Optional[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
snake_case__ : Optional[int] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_A , _A ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
snake_case__ : List[Any] = 0
for count in range(_A ):
while holes[count] > 0:
holes[count] -= 1
snake_case__ : Any = count + min_val
i += 1
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_A )
print("""Sorted order is:""" , """ """.join(_A ) )
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
A_ : Dict = logging.getLogger(__name__)
A_ : List[Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
A_ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__SCREAMING_SNAKE_CASE )} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCamelCase__ = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCamelCase__ = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __UpperCamelCase ( self ):
if self.train_file is not None:
snake_case__ : Optional[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case__ : int = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : List[str] ) -> Optional[int]:
'''simple docstring'''
with open(__magic_name__ , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : Tuple = [json.loads(__magic_name__ ) for line in f.read().splitlines() if (len(__magic_name__ ) > 0 and not line.isspace())]
assert len(__magic_name__ ) == len(__magic_name__ )
snake_case__ : Optional[int] = {c: dataset[c] for c in dataset.column_names}
snake_case__ : Dict = refs
return Dataset.from_dict(__magic_name__ )
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
snake_case__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __magic_name__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
snake_case__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
snake_case__ : Optional[int] = {}
if data_args.train_file is not None:
snake_case__ : Tuple = data_args.train_file
if data_args.validation_file is not None:
snake_case__ : Optional[int] = data_args.validation_file
snake_case__ : str = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
snake_case__ : Dict = """text"""
snake_case__ : Union[str, Any] = load_dataset(__magic_name__ , data_files=__magic_name__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case__ : Any = AutoConfig.from_pretrained(model_args.config_name , **__magic_name__ )
elif model_args.model_name_or_path:
snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
snake_case__ : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
snake_case__ : Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__magic_name__ )
elif model_args.model_name_or_path:
snake_case__ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
snake_case__ : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
snake_case__ : int = AutoModelForMaskedLM.from_config(__magic_name__ )
model.resize_token_embeddings(len(__magic_name__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case__ : Dict = datasets["""train"""].column_names
else:
snake_case__ : Optional[Any] = datasets["""validation"""].column_names
snake_case__ : int = """text""" if """text""" in column_names else column_names[0]
snake_case__ : str = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(__magic_name__ : Optional[Any] ):
# Remove empty lines
snake_case__ : Union[str, Any] = [line for line in examples["""text"""] if len(__magic_name__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=__magic_name__ , truncation=__magic_name__ , max_length=data_args.max_seq_length )
snake_case__ : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case__ : Optional[int] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case__ : Tuple = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case__ : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case__ : Optional[int] = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case__ : Union[str, Any] = DataCollatorForWholeWordMask(tokenizer=__magic_name__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ : Tuple = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case__ : Tuple = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case__ : Union[str, Any] = model_args.model_name_or_path
else:
snake_case__ : Dict = None
snake_case__ : List[str] = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case__ : Any = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(__magic_name__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
snake_case__ : int = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case__ : int = trainer.evaluate()
snake_case__ : int = math.exp(eval_output["""eval_loss"""] )
snake_case__ : Union[str, Any] = perplexity
snake_case__ : Dict = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(__magic_name__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 419 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__SCREAMING_SNAKE_CASE = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , __UpperCAmelCase )
if matches:
__SCREAMING_SNAKE_CASE = float(matches[1] )
__SCREAMING_SNAKE_CASE = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__SCREAMING_SNAKE_CASE = 1001
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(__UpperCAmelCase ) + 1: v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = """background"""
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_mobilenet_va_config(__UpperCAmelCase )
# Load 🤗 model
__SCREAMING_SNAKE_CASE = MobileNetVaForImageClassification(__UpperCAmelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__SCREAMING_SNAKE_CASE = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__SCREAMING_SNAKE_CASE = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__SCREAMING_SNAKE_CASE = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__SCREAMING_SNAKE_CASE = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__SCREAMING_SNAKE_CASE = """google/""" + model_name
image_processor.push_to_hub(__UpperCAmelCase )
model.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 109 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight" in name:
__SCREAMING_SNAKE_CASE = """weight"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
else:
__SCREAMING_SNAKE_CASE = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = HubertForCTC(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = HubertModel(__UpperCAmelCase )
if is_finetuned:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
a = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 109 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_UpperCAmelCase = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_UpperCAmelCase = get_tests_dir('fixtures/vocab.json')
_UpperCAmelCase = get_tests_dir('fixtures')
class snake_case_ ( unittest.TestCase ):
A_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def UpperCAmelCase__ ( self : Union[str, Any] )->str:
'''simple docstring'''
__lowerCAmelCase : int = 0
def UpperCAmelCase__ ( self : List[str] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : List[Any] )->Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Optional[int] = WavaVecaConfig()
__lowerCAmelCase : Tuple = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
__lowerCAmelCase : int = AutoProcessor.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : str )->Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_snake_case , os.path.join(_snake_case , _snake_case ) )
copyfile(_snake_case , os.path.join(_snake_case , """vocab.json""" ) )
__lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[Any] )->Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Any = WavaVecaFeatureExtractor()
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__lowerCAmelCase : List[Any] = WavaVecaProcessor(_snake_case , _snake_case )
# save in new folder
processor.save_pretrained(_snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(_snake_case , _snake_case ) , """r""" ) as f:
__lowerCAmelCase : Dict = json.load(_snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(_snake_case , _snake_case ) , """w""" ) as f:
f.write(json.dumps(_snake_case ) )
__lowerCAmelCase : List[str] = AutoProcessor.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Any )->Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Tuple = WavaVecaFeatureExtractor()
__lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
__lowerCAmelCase : Union[str, Any] = WavaVecaProcessor(_snake_case , _snake_case )
# save in new folder
processor.save_pretrained(_snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(_snake_case , _snake_case ) , """r""" ) as f:
__lowerCAmelCase : Any = json.load(_snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(_snake_case , _snake_case ) , """w""" ) as f:
f.write(json.dumps(_snake_case ) )
__lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : str = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_snake_case )
# copy relevant files
copyfile(_snake_case , os.path.join(_snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_snake_case , _snake_case ) , """w""" ) as f:
f.write("""{}""" )
__lowerCAmelCase : int = AutoProcessor.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[Any] )->Dict:
'''simple docstring'''
with self.assertRaises(_snake_case ):
__lowerCAmelCase : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_snake_case ):
__lowerCAmelCase : str = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_snake_case )
__lowerCAmelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
__lowerCAmelCase : Union[str, Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
__lowerCAmelCase : Union[str, Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowerCAmelCase : str = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_snake_case , use_fast=_snake_case )
__lowerCAmelCase : str = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _snake_case )
AutoFeatureExtractor.register(_snake_case , _snake_case )
AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case )
AutoProcessor.register(_snake_case , _snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_snake_case ):
AutoProcessor.register(_snake_case , _snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase : Optional[int] = CustomFeatureExtractor.from_pretrained(_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : Dict = os.path.join(_snake_case , """vocab.txt""" )
with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__lowerCAmelCase : Optional[int] = CustomTokenizer(_snake_case )
__lowerCAmelCase : str = CustomProcessor(_snake_case , _snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_snake_case )
__lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
class snake_case_ ( __lowercase ):
A_ = False
class snake_case_ ( __lowercase ):
A_ = False
class snake_case_ ( __lowercase ):
A_ = 'AutoFeatureExtractor'
A_ = 'AutoTokenizer'
A_ = False
try:
AutoConfig.register("""custom""" , _snake_case )
AutoFeatureExtractor.register(_snake_case , _snake_case )
AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case )
AutoProcessor.register(_snake_case , _snake_case )
# If remote code is not set, the default is to use local classes.
__lowerCAmelCase : Any = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__lowerCAmelCase : str = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : int = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def UpperCAmelCase__ ( self : Union[str, Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class snake_case_ ( unittest.TestCase ):
A_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def UpperCAmelCase__ ( cls : str )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def UpperCAmelCase__ ( cls : str )->Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = WavaVecaProcessor.from_pretrained(_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_snake_case , """test-processor""" ) , push_to_hub=_snake_case , use_auth_token=self._token )
__lowerCAmelCase : str = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_snake_case , getattr(new_processor.feature_extractor , _snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase__ ( self : Optional[int] )->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = WavaVecaProcessor.from_pretrained(_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_snake_case , """test-processor-org""" ) , push_to_hub=_snake_case , use_auth_token=self._token , organization="""valid_org""" , )
__lowerCAmelCase : Optional[Any] = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_snake_case , getattr(new_processor.feature_extractor , _snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__lowerCAmelCase : int = CustomFeatureExtractor.from_pretrained(_snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : int = os.path.join(_snake_case , """vocab.txt""" )
with open(_snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
__lowerCAmelCase : Union[str, Any] = CustomTokenizer(_snake_case )
__lowerCAmelCase : List[Any] = CustomProcessor(_snake_case , _snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
__lowerCAmelCase : str = Repository(_snake_case , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(_snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_snake_case , """tokenizer_config.json""" ) ) as f:
__lowerCAmelCase : int = json.load(_snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
__lowerCAmelCase : Optional[Any] = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" ) | 712 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> str:
if not (isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
__lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Dict = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowerCAmelCase : List[str] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowerCAmelCase : Tuple = i
__lowerCAmelCase : Any = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 240 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a=99 , _a=13 , _a=7 , _a=9 , _a=True , _a=True , _a=False , _a=32 , _a=5 , _a=4 , _a=37 , _a=8 , _a=0.1 , _a=0.002 , _a=1 , _a=0 , _a=0 , _a=None , _a=None , ):
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = encoder_seq_length
a__ = decoder_seq_length
# For common tests
a__ = self.decoder_seq_length
a__ = is_training
a__ = use_attention_mask
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = d_ff
a__ = relative_attention_num_buckets
a__ = dropout_rate
a__ = initializer_factor
a__ = eos_token_id
a__ = pad_token_id
a__ = decoder_start_token_id
a__ = None
a__ = decoder_layers
def lowercase__ ( self ):
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base' )
def lowercase__ ( self , _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ):
"""simple docstring"""
if attention_mask is None:
a__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__A )
if decoder_head_mask is None:
a__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__A )
if cross_attn_head_mask is None:
a__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase__ ( self ):
"""simple docstring"""
a__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
a__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a__ = input_ids.clamp(self.pad_token_id + 1 )
a__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
a__ = self.get_config()
a__ = config.num_attention_heads
a__ = self.prepare_inputs_dict(__A , __A , __A )
return config, input_dict
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self ):
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
a__ = UMTaModel(config=__A )
model.to(__A )
model.eval()
a__ = model(
input_ids=__A , decoder_input_ids=__A , attention_mask=__A , decoder_attention_mask=__A , )
a__ = model(input_ids=__A , decoder_input_ids=__A )
a__ = result.last_hidden_state
a__ = result.past_key_values
a__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowercase__ ( self , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
a__ = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
a__ = model(__A , use_cache=__A )
a__ = model(__A )
a__ = model(__A , use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
a__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] , dim=-1 )
a__ = model(__A )['''last_hidden_state''']
a__ = model(__A , past_key_values=__A )['''last_hidden_state''']
# select random slice
a__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -1, random_slice_idx].detach()
a__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) )
def lowercase__ ( self , _a , _a , ):
"""simple docstring"""
a__ = UMTaModel(config=__A ).to(__A ).half().eval()
a__ = model(**__A )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class _UpperCamelCase ( _A , _A , _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Dict = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE:Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE:int = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE:str = True
SCREAMING_SNAKE_CASE:Tuple = False
SCREAMING_SNAKE_CASE:int = False
SCREAMING_SNAKE_CASE:List[str] = True
SCREAMING_SNAKE_CASE:int = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE:Any = [0.8, 0.9]
def lowercase__ ( self ):
"""simple docstring"""
a__ = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
a__ = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=__A , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def lowercase__ ( self ):
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def lowercase__ ( self ):
"""simple docstring"""
a__ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
a__ = self.model_tester.prepare_config_and_inputs()
a__ = config_and_inputs[0]
a__ = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
a__ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__A ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
}
for attn_name, (name, mask) in zip(__A , head_masking.items() ):
a__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
a__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=__A )
a__ = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=__A , return_dict_in_generate=__A , **__A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
a__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def lowercase__ ( self ):
"""simple docstring"""
a__ = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=__A ).to(__A )
a__ = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=__A , legacy=__A )
a__ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
a__ = tokenizer(__A , return_tensors='pt' , padding=__A ).input_ids
# fmt: off
a__ = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A , __A )
a__ = model.generate(input_ids.to(__A ) )
a__ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
a__ = tokenizer.batch_decode(__A )
self.assertEqual(__A , __A )
| 394 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 443 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableDiffusionPanoramaPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase = DDIMScheduler()
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCamelCase = CLIPTextModel(_A )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self : Optional[Any] , _A : str , _A : int=0 ):
_UpperCamelCase = torch.manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**_A )
_UpperCamelCase = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = sd_pipe(**_A ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : List[Any] ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Optional[int] ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**_A )
_UpperCamelCase = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = '''french fries'''
_UpperCamelCase = sd_pipe(**_A , negative_prompt=_A )
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**_A )
_UpperCamelCase = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = sd_pipe(**_A , view_batch_size=2 )
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
_UpperCamelCase = StableDiffusionPanoramaPipeline(**_A )
_UpperCamelCase = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = sd_pipe(**_A ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_A )
_UpperCamelCase = StableDiffusionPanoramaPipeline(**_A )
_UpperCamelCase = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = self.get_dummy_inputs(_A )
_UpperCamelCase = sd_pipe(**_A ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Tuple , _A : Tuple=0 ):
_UpperCamelCase = torch.manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCamelCase = DDIMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**_A ).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCamelCase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_A )
_UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**_A ).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = 0
def callback_fn(_A : int , _A : int , _A : torch.FloatTensor ) -> None:
_UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_UpperCamelCase = False
_UpperCamelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCamelCase = DDIMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
pipe(**_A , callback=_A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase_ ( self : int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCamelCase = DDIMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**_A )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 71 | import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DPMSolverSDEScheduler,)
UpperCAmelCase = 10
def UpperCamelCase_ ( self : Tuple , **_A : Union[str, Any] ):
_UpperCamelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : List[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : List[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A , use_karras_sigmas=_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
_UpperCamelCase = sample.to(_A )
for t in scheduler.timesteps:
_UpperCamelCase = scheduler.scale_model_input(_A , _A )
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A )
_UpperCamelCase = output.prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 71 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase :
def __init__( self , snake_case , snake_case=100 , snake_case=13 , snake_case=30 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=4 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=3 , snake_case=None , snake_case=[0, 1, 2, 3] , ):
snake_case_ = parent
snake_case_ = 100
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = out_indices
snake_case_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 1
def a ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def a ( self , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case_ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case_ = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = self.type_sequence_label_size
snake_case_ = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case_ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self , snake_case , snake_case , snake_case , snake_case ):
snake_case_ = self.num_labels
snake_case_ = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
snake_case_ = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case_ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : int = False
def a ( self ):
snake_case_ = BeitModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def a ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def a ( self ):
pass
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(snake_case__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def a ( self ):
if not self.model_tester.is_training:
return
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
snake_case_ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
snake_case_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case_ = model(**snake_case__ ).loss
loss.backward()
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ = False
snake_case_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
snake_case_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
snake_case_ = model(**snake_case__ ).loss
loss.backward()
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def a ( self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a ( self ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def a ( self ):
snake_case_ = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(snake_case__ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
snake_case_ = torch.ones((1, 196) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case_ = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
snake_case_ = outputs.logits
# verify the logits
snake_case_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , snake_case__ )
snake_case_ = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def a ( self ):
snake_case_ = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(snake_case__ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case__ )
snake_case_ = outputs.logits
# verify the logits
snake_case_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , snake_case__ )
snake_case_ = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
snake_case_ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def a ( self ):
snake_case_ = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
snake_case__ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case__ )
snake_case_ = outputs.logits
# verify the logits
snake_case_ = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , snake_case__ )
snake_case_ = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
snake_case_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def a ( self ):
snake_case_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
snake_case_ = model.to(snake_case__ )
snake_case_ = BeitImageProcessor(do_resize=snake_case__ , size=640 , do_center_crop=snake_case__ )
snake_case_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
snake_case_ = Image.open(ds[0]['file'] )
snake_case_ = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case__ )
snake_case_ = outputs.logits
# verify the logits
snake_case_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , snake_case__ )
snake_case_ = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
snake_case_ = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=snake_case__ , )
else:
snake_case_ = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def a ( self ):
snake_case_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
snake_case_ = model.to(snake_case__ )
snake_case_ = BeitImageProcessor(do_resize=snake_case__ , size=640 , do_center_crop=snake_case__ )
snake_case_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
snake_case_ = Image.open(ds[0]['file'] )
snake_case_ = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
snake_case_ = model(**snake_case__ )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(500, 300)] )
snake_case_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , snake_case__ )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
snake_case_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 362 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , A__ : Optional[int] , A__ : str=1_3 , A__ : List[Any]=7 , A__ : Optional[Any]=True , A__ : Tuple=True , A__ : Union[str, Any]=True , A__ : Any=True , A__ : List[Any]=9_9 , A__ : Optional[Any]=3_2 , A__ : Dict=5 , A__ : Union[str, Any]=4 , A__ : int=3_7 , A__ : List[str]="gelu" , A__ : List[Any]=0.1 , A__ : List[str]=0.1 , A__ : Optional[int]=1_2_8 , A__ : Any=3_2 , A__ : List[Any]=1_6 , A__ : str=2 , A__ : Union[str, Any]=0.02 , A__ : Optional[int]=3 , A__ : Optional[int]=4 , A__ : Optional[int]=None , ) -> Any:
'''simple docstring'''
a__ : Union[str, Any] = parent
a__ : int = batch_size
a__ : Any = seq_length
a__ : List[Any] = is_training
a__ : Any = use_input_mask
a__ : Optional[int] = use_token_type_ids
a__ : List[Any] = use_labels
a__ : Union[str, Any] = vocab_size
a__ : Tuple = hidden_size
a__ : List[str] = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : List[Any] = intermediate_size
a__ : int = hidden_act
a__ : Optional[Any] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : List[Any] = max_position_embeddings
a__ : int = type_vocab_size
a__ : Union[str, Any] = type_sequence_label_size
a__ : Union[str, Any] = initializer_range
a__ : int = num_labels
a__ : int = num_choices
a__ : Any = scope
def __lowerCAmelCase ( self : int ) -> Tuple:
'''simple docstring'''
a__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : str = None
if self.use_input_mask:
a__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a__ : int = None
if self.use_token_type_ids:
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : str = None
a__ : List[str] = None
a__ : Dict = None
if self.use_labels:
a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a__ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Any = self.prepare_config_and_inputs()
a__ : Tuple = True
a__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self : int , A__ : Dict , A__ : Optional[Any] , A__ : str , A__ : Union[str, Any] , A__ : Dict , A__ : Tuple , A__ : int ) -> Any:
'''simple docstring'''
a__ : Optional[int] = NezhaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a__ : Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
a__ : Dict = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
a__ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[int] , A__ : Tuple , A__ : Dict , A__ : Optional[Any] , A__ : Dict , A__ : Union[str, Any] , A__ : int , A__ : Optional[Any] , A__ : List[Any] , A__ : Union[str, Any] , ) -> int:
'''simple docstring'''
a__ : List[str] = True
a__ : Optional[Any] = NezhaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a__ : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
a__ : Union[str, Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
a__ : Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : Optional[int] , A__ : Tuple , A__ : List[str] , A__ : Union[str, Any] , A__ : List[str] , A__ : Any ) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = NezhaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a__ : List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple , A__ : Any , A__ : Any , A__ : int , A__ : str , A__ : Dict , A__ : int ) -> Any:
'''simple docstring'''
a__ : int = NezhaForNextSentencePrediction(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a__ : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self : Any , A__ : Optional[Any] , A__ : List[str] , A__ : List[str] , A__ : Any , A__ : List[Any] , A__ : Optional[int] , A__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = NezhaForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a__ : str = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , A__ : Tuple , A__ : Any , A__ : str , A__ : List[Any] , A__ : Dict , A__ : Tuple ) -> Any:
'''simple docstring'''
a__ : str = NezhaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a__ : Optional[Any] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] , A__ : int , A__ : List[str] , A__ : List[str] , A__ : int , A__ : Optional[int] , A__ : List[str] , A__ : List[str] ) -> Tuple:
'''simple docstring'''
a__ : Tuple = self.num_labels
a__ : Optional[int] = NezhaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a__ : Any = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[Any] , A__ : List[str] , A__ : Any , A__ : List[str] , A__ : int , A__ : List[Any] , A__ : Union[str, Any] , A__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
a__ : Dict = self.num_labels
a__ : Optional[int] = NezhaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a__ : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Tuple , A__ : Optional[Any] , A__ : Any , A__ : Optional[Any] , A__ : Union[str, Any] , A__ : Union[str, Any] , A__ : Dict , A__ : str ) -> int:
'''simple docstring'''
a__ : List[str] = self.num_choices
a__ : Optional[Any] = NezhaForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
a__ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Any = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = config_and_inputs
a__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
def __lowerCAmelCase ( self : Union[str, Any] , A__ : List[str] , A__ : List[Any] , A__ : List[Any]=False ) -> Dict:
'''simple docstring'''
a__ : Optional[int] = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
a__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
a__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
a__ : List[str] = NezhaModelTester(self )
a__ : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def __lowerCAmelCase ( self : Any ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
a__ : int = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __lowerCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __lowerCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__UpperCamelCase )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[Any] = NezhaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ , a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
a__ : List[Any] = True
a__ : Optional[int] = model_class(config=__UpperCamelCase )
a__ : Tuple = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
a__ : Optional[int] = torch.jit.trace(
__UpperCamelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCamelCase , os.path.join(__UpperCamelCase , '''bert.pt''' ) )
a__ : Tuple = torch.jit.load(os.path.join(__UpperCamelCase , '''bert.pt''' ) , map_location=__UpperCamelCase )
loaded(inputs_dict['''input_ids'''].to(__UpperCamelCase ) , inputs_dict['''attention_mask'''].to(__UpperCamelCase ) )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : Union[str, Any] = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
a__ : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a__ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a__ : Dict = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
a__ : Dict = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __UpperCamelCase )
a__ : Dict = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self : str ) -> Any:
'''simple docstring'''
a__ : List[Any] = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
a__ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
a__ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a__ : Union[str, Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
a__ : Optional[Any] = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , __UpperCamelCase )
a__ : Optional[Any] = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) )
| 719 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "data2vec-audio"
def __init__( self : Tuple , A__ : List[str]=3_2 , A__ : Optional[int]=7_6_8 , A__ : List[str]=1_2 , A__ : Any=1_2 , A__ : Any=3_0_7_2 , A__ : Optional[Any]="gelu" , A__ : Any=0.1 , A__ : List[Any]=0.1 , A__ : Dict=0.1 , A__ : Tuple=0.0 , A__ : str=0.1 , A__ : Union[str, Any]=0.1 , A__ : List[Any]=0.02 , A__ : Optional[Any]=1E-5 , A__ : Dict="gelu" , A__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A__ : Any=(5, 2, 2, 2, 2, 2, 2) , A__ : str=(1_0, 3, 3, 3, 3, 2, 2) , A__ : str=False , A__ : Any=1_6 , A__ : Optional[Any]=1_9 , A__ : List[Any]=5 , A__ : Optional[Any]=0.05 , A__ : Optional[Any]=1_0 , A__ : Dict=2 , A__ : int=0.0 , A__ : Optional[Any]=1_0 , A__ : str=0 , A__ : Any="sum" , A__ : Optional[int]=False , A__ : Dict=False , A__ : Dict=2_5_6 , A__ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , A__ : str=(5, 3, 3, 1, 1) , A__ : Any=(1, 2, 3, 1, 1) , A__ : Optional[int]=5_1_2 , A__ : List[str]=0 , A__ : Optional[int]=1 , A__ : int=2 , A__ : List[str]=False , A__ : Dict=3 , A__ : Any=2 , A__ : List[str]=3 , A__ : Any=None , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
a__ : Optional[Any] = hidden_size
a__ : Union[str, Any] = feat_extract_activation
a__ : str = list(A__ )
a__ : Dict = list(A__ )
a__ : int = list(A__ )
a__ : Dict = conv_bias
a__ : Tuple = num_conv_pos_embeddings
a__ : Tuple = num_conv_pos_embedding_groups
a__ : str = conv_pos_kernel_size
a__ : Dict = len(self.conv_dim )
a__ : str = num_hidden_layers
a__ : List[Any] = intermediate_size
a__ : List[Any] = hidden_act
a__ : str = num_attention_heads
a__ : Tuple = hidden_dropout
a__ : Union[str, Any] = attention_dropout
a__ : Dict = activation_dropout
a__ : str = feat_proj_dropout
a__ : Optional[Any] = final_dropout
a__ : List[str] = layerdrop
a__ : Optional[int] = layer_norm_eps
a__ : str = initializer_range
a__ : Union[str, Any] = vocab_size
a__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : Tuple = mask_time_prob
a__ : str = mask_time_length
a__ : Dict = mask_time_min_masks
a__ : Tuple = mask_feature_prob
a__ : Union[str, Any] = mask_feature_length
a__ : Optional[Any] = mask_feature_min_masks
# ctc loss
a__ : Optional[Any] = ctc_loss_reduction
a__ : Any = ctc_zero_infinity
# adapter
a__ : Dict = add_adapter
a__ : int = adapter_kernel_size
a__ : Tuple = adapter_stride
a__ : Union[str, Any] = num_adapter_layers
a__ : Any = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a__ : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a__ : int = list(A__ )
a__ : Union[str, Any] = list(A__ )
a__ : Dict = list(A__ )
a__ : Dict = xvector_output_dim
@property
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return math.prod(self.conv_stride )
| 340 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase__ ( _A ):
a : List[str] = [False] * len(_A )
a : Optional[int] = [-1] * len(_A )
def dfs(_A , _A ):
a : int = True
a : Tuple = c
for u in graph[v]:
if not visited[u]:
dfs(_A , 1 - c )
for i in range(len(_A ) ):
if not visited[i]:
dfs(_A , 0 )
for i in range(len(_A ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase: Optional[int] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 526 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase: Dict = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[Any] = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 526 | 1 |
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ , A__ = [], []
while len(_UpperCamelCase ) > 1:
A__ , A__ = min(_UpperCamelCase ), max(_UpperCamelCase )
start.append(_UpperCamelCase )
end.append(_UpperCamelCase )
collection.remove(_UpperCamelCase )
collection.remove(_UpperCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''') | 721 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __a , __a=7 , __a=3 , __a=30 , __a=400 , __a=True , __a=None , __a=0.9 , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A__ = size if size is not None else {'shortest_edge': 30}
A__ = crop_size if crop_size is not None else {'height': 30, 'width': 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def _UpperCAmelCase ( self ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = PoolFormerImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'crop_pct' ) )
self.assertTrue(hasattr(__a , 'do_normalize' ) )
self.assertTrue(hasattr(__a , 'image_mean' ) )
self.assertTrue(hasattr(__a , 'image_std' ) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 554 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
UpperCAmelCase = subparsers.add_parser('''test''' )
else:
UpperCAmelCase = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase = script_name
else:
UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
UpperCAmelCase = ['''accelerate-launch'''] + test_args.split()
UpperCAmelCase = execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = test_command_parser()
UpperCAmelCase = parser.parse_args()
test_command(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 51 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not head:
return True
# split the list to two parts
UpperCAmelCase, UpperCAmelCase = head.next, head
while fast and fast.next:
UpperCAmelCase = fast.next.next
UpperCAmelCase = slow.next
UpperCAmelCase = slow.next
UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase = None
while second:
UpperCAmelCase = second.next
UpperCAmelCase = node
UpperCAmelCase = second
UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase = node.next
UpperCAmelCase = head.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = head
while fast and fast.next:
UpperCAmelCase, UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase = [slow.val]
while slow.next:
UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase = cur.next
return True
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
UpperCAmelCase = {}
UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = [pos]
UpperCAmelCase = head.next
pos += 1
UpperCAmelCase = pos - 1
UpperCAmelCase = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 51 | 1 |
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def A_ ( ) -> None:
"""simple docstring"""
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" )
print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" )
print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" )
print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 715 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A__ : Any =logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , *lowerCamelCase : int , **lowerCamelCase : Optional[int] ):
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
self.check_model_type(lowerCamelCase )
def lowercase_( self : Any , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : int=None , **lowerCamelCase : int ):
"""simple docstring"""
__A , __A : Tuple = {}, {}
if padding is not None:
__A : Any = padding
if truncation is not None:
__A : Optional[Any] = truncation
if top_k is not None:
__A : List[str] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Any , lowerCamelCase : Union["Image.Image", str] , lowerCamelCase : str = None , **lowerCamelCase : Tuple ):
"""simple docstring"""
if isinstance(lowerCamelCase , (Image.Image, str) ) and isinstance(lowerCamelCase , lowerCamelCase ):
__A : Tuple = {"""image""": image, """question""": question}
else:
__A : List[Any] = image
__A : Any = super().__call__(lowerCamelCase , **lowerCamelCase )
return results
def lowercase_( self : int , lowerCamelCase : Optional[int] , lowerCamelCase : Dict=False , lowerCamelCase : str=False ):
"""simple docstring"""
__A : List[str] = load_image(inputs["""image"""] )
__A : Optional[Any] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCamelCase , truncation=lowerCamelCase )
__A : Union[str, Any] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
return model_inputs
def lowercase_( self : List[str] , lowerCamelCase : Tuple ):
"""simple docstring"""
__A : List[str] = self.model(**lowerCamelCase )
return model_outputs
def lowercase_( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__A : str = self.model.config.num_labels
if self.framework == "pt":
__A : Optional[Any] = model_outputs.logits.sigmoid()[0]
__A , __A : int = probs.topk(lowerCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__A : Optional[Any] = scores.tolist()
__A : Dict = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 499 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_A = get_tests_dir('''fixtures''')
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = mock.Mock()
lowerCAmelCase_ = 500
lowerCAmelCase_ = {}
lowerCAmelCase_ = HTTPError
lowerCAmelCase_ = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=_UpperCAmelCase ) as mock_head:
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class A ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase )
feature_extractor.push_to_hub('''test-feature-extractor''', use_auth_token=self._token )
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCAmelCase, repo_id='''test-feature-extractor''', push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''', use_auth_token=self._token )
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCAmelCase, repo_id='''valid_org/test-feature-extractor-org''', push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
lowerCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase_ = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map, {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''}, )
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor", trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__, '''CustomFeatureExtractor''' )
| 431 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = {}
a_ = tokenizer(example["""content"""] , truncation=UpperCAmelCase__ )["""input_ids"""]
a_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
A_ : Optional[int] =HfArgumentParser(PretokenizationArguments)
A_ : Optional[Any] =parser.parse_args()
if args.num_workers is None:
A_ : List[str] =multiprocessing.cpu_count()
A_ : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
A_ : int =time.time()
A_ : Any =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
A_ : Optional[int] =time.time()
A_ : Any =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
A_ : Optional[int] =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 483 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : int = "new-model"
if is_tf_available():
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : int = NewModelConfig
@require_tf
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@slow
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Optional[Any] = """bert-base-cased"""
__snake_case :Optional[int] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Tuple = TFAutoModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :List[str] = """bert-base-cased"""
__snake_case :Union[str, Any] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :List[str] = TFAutoModelForPreTraining.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :List[str] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :List[str] = TFAutoModelForCausalLM.from_pretrained(a__ )
__snake_case , __snake_case :Any = TFAutoModelForCausalLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :Any = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :int = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Any:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :List[Any] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(a__ )
__snake_case , __snake_case :int = TFAutoModelForMaskedLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :Any = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :str = TFAutoModelForSeqaSeqLM.from_pretrained(a__ )
__snake_case , __snake_case :Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case :Optional[int] = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def __lowercase ( self ) -> Dict:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
__snake_case :int = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
@require_tensorflow_probability
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case :Any = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case :Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(a__ )
__snake_case , __snake_case :Tuple = TFAutoModelForTableQuestionAnswering.from_pretrained(
a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Dict = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_44_10 )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[int] = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_44_10 )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :str = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(a__ , a__ )
__snake_case :List[str] = copy.deepcopy(model.config )
__snake_case :Tuple = ["""FunnelBaseModel"""]
__snake_case :int = TFAutoModel.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case :Union[str, Any] = TFAutoModel.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register("""new-model""" , a__ )
__snake_case :List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
auto_class.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case :Optional[int] = BertModelTester(self ).get_config()
__snake_case :Dict = NewModelConfig(**tiny_config.to_dict() )
__snake_case :Tuple = auto_class.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case :Any = auto_class.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
a__ , """bert-base is not a local folder and is not a valid model identifier""" ):
__snake_case :List[str] = TFAutoModel.from_pretrained("""bert-base""" )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
a__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__snake_case :List[Any] = TFAutoModel.from_pretrained(a__ , revision="""aaaaaa""" )
def __lowercase ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
a__ , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
__snake_case :Tuple = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def __lowercase ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(a__ , """Use `from_pt=True` to load this model""" ):
__snake_case :str = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :int = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__snake_case :Optional[int] = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__snake_case :Tuple = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
__snake_case :str = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 291 |
import os
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = os.path.dirname(os.path.realpath(snake_case__ ) )
__snake_case :Union[str, Any] = os.path.join(snake_case__ ,"""triangle.txt""" )
with open(snake_case__ ) as f:
__snake_case :int = f.readlines()
__snake_case :int = []
for line in triangle:
__snake_case :List[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 ,len(snake_case__ ) ):
for j in range(len(a[i] ) ):
__snake_case :Union[str, Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
__snake_case :Dict = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ ,snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 291 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = FlaxAutoencoderKL
@property
def __a ( self ):
_lowercase : Optional[Any] = 4
_lowercase : List[str] = 3
_lowercase : int = (3_2, 3_2)
_lowercase : List[str] = jax.random.PRNGKey(0 )
_lowercase : Union[str, Any] = jax.random.uniform(_lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __a ( self ):
_lowercase : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_lowercase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
| 66 |
import random
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> dict:
_lowercase : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE )
return graph
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict:
return {
i: [j for j in range(SCREAMING_SNAKE_CASE ) if i != j] for i in range(SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = {
"""en""": """Machine learning is great, isn\'t it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowercase : List[str] = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
_lowercase : List[str] = F"""{src_lang}-{tgt_lang}"""
_lowercase : List[str] = F"""\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"""
model_card_dir.mkdir(parents=__snake_case , exist_ok=__snake_case )
_lowercase : Optional[Any] = os.path.join(__snake_case , """README.md""" )
print(F"""Generating {path}""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(__snake_case )
# make sure we are under the root of the project
UpperCAmelCase: int = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase: str = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCAmelCase: Tuple = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 707 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase: str = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Dict = set()
_lowercase : Optional[int] = []
def parse_line(__UpperCAmelCase ):
for line in fp:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(__UpperCAmelCase ) > 0:
_lowercase : Optional[Any] = """\n""".join(__UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(__UpperCAmelCase )
buffer.clear()
continue
else:
_lowercase : Optional[Any] = line.strip()
buffer.append(__UpperCAmelCase )
if from_gh:
for filename in os.listdir(__UpperCAmelCase ):
_lowercase : Optional[Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if not os.path.isdir(__UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(__UpperCAmelCase ) as fp:
parse_line(__UpperCAmelCase )
else:
try:
with zipfile.ZipFile(__UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__UpperCAmelCase ) as fp:
parse_line(__UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : Optional[Any] = set()
_lowercase : int = [os.path.join(__UpperCAmelCase , __UpperCAmelCase ) for p in os.listdir(__UpperCAmelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__UpperCAmelCase , __UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
return values.split(""",""" )
UpperCAmelCase: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
UpperCAmelCase: Any = parser.parse_args()
UpperCAmelCase: Any = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase: str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase: str = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase: Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 600 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def UpperCAmelCase__( __UpperCAmelCase : str=None ):
__snake_case : Optional[int] = argparse.ArgumentParser(add_help=__UpperCAmelCase , allow_abbrev=__UpperCAmelCase )
# The main config parser
__snake_case : List[Any] = config_command_parser(__UpperCAmelCase )
# The subparser to add commands to
__snake_case : List[str] = config_parser.add_subparsers(title='subcommands' , dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(__UpperCAmelCase , parents=[parent_parser] )
update_command_parser(__UpperCAmelCase , parents=[parent_parser] )
return config_parser
def UpperCAmelCase__( ):
__snake_case : List[str] = get_config_parser()
__snake_case : List[Any] = config_parser.parse_args()
if not hasattr(__UpperCAmelCase , 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 576 | import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Optional[int] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__snake_case : List[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__snake_case : str = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__snake_case : Optional[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 16_000,
'return_attention_mask': False,
'do_normalize': True,
}
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : Any = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
# load decoder from hub
__snake_case : int = 'hf-internal-testing/ngram-beam-search-decoder'
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_UpperCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_UpperCAmelCase )
def lowercase_ ( self ):
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ):
__snake_case : Dict = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : Optional[int] = self.get_decoder()
__snake_case : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _UpperCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(_UpperCAmelCase , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=_UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : Union[str, Any] = floats_list((3, 1_000) )
__snake_case : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='np' )
__snake_case : Dict = processor(_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Any = self.get_decoder()
__snake_case : int = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : str = 'This is a test string'
__snake_case : Optional[int] = processor(text=_UpperCAmelCase )
__snake_case : int = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self , _UpperCAmelCase=(2, 10, 16) , _UpperCAmelCase=77 ):
np.random.seed(_UpperCAmelCase )
return np.random.rand(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : Optional[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_UpperCAmelCase )
__snake_case : Tuple = decoder.decode_beams(_UpperCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Union[str, Any] = self.get_feature_extractor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : List[Any] = processor.batch_decode(_UpperCAmelCase )
else:
with get_context(_UpperCAmelCase ).Pool() as pool:
__snake_case : Tuple = processor.batch_decode(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = list(_UpperCAmelCase )
with get_context('fork' ).Pool() as p:
__snake_case : List[Any] = decoder.decode_beams_batch(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case , __snake_case : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_UpperCAmelCase , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(_UpperCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_UpperCAmelCase , decoded_processor.lm_score )
def lowercase_ ( self ):
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : List[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : Tuple = 15
__snake_case : int = -20.0
__snake_case : Optional[Any] = -4.0
__snake_case : int = processor.batch_decode(
_UpperCAmelCase , beam_width=_UpperCAmelCase , beam_prune_logp=_UpperCAmelCase , token_min_logp=_UpperCAmelCase , )
__snake_case : int = decoded_processor_out.text
__snake_case : Dict = list(_UpperCAmelCase )
with get_context('fork' ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_UpperCAmelCase , _UpperCAmelCase , beam_width=_UpperCAmelCase , beam_prune_logp=_UpperCAmelCase , token_min_logp=_UpperCAmelCase , )
__snake_case : List[str] = [d[0][0] for d in decoded_decoder_out]
__snake_case : Any = [d[0][2] for d in decoded_decoder_out]
__snake_case : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , _UpperCAmelCase )
self.assertTrue(np.array_equal(_UpperCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _UpperCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_UpperCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _UpperCAmelCase , atol=1E-3 ) )
def lowercase_ ( self ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : int = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[str] = 2.0
__snake_case : Union[str, Any] = 5.0
__snake_case : List[str] = -20.0
__snake_case : Tuple = True
__snake_case : List[Any] = processor.batch_decode(
_UpperCAmelCase , alpha=_UpperCAmelCase , beta=_UpperCAmelCase , unk_score_offset=_UpperCAmelCase , lm_score_boundary=_UpperCAmelCase , )
__snake_case : Tuple = decoded_processor_out.text
__snake_case : List[str] = list(_UpperCAmelCase )
decoder.reset_params(
alpha=_UpperCAmelCase , beta=_UpperCAmelCase , unk_score_offset=_UpperCAmelCase , lm_score_boundary=_UpperCAmelCase , )
with get_context('fork' ).Pool() as pool:
__snake_case : str = decoder.decode_beams_batch(
_UpperCAmelCase , _UpperCAmelCase , )
__snake_case : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , _UpperCAmelCase )
__snake_case : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : Any = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : List[str] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case : List[Any] = os.listdir(_UpperCAmelCase )
__snake_case : Tuple = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : str = snapshot_download('hf-internal-testing/processor_with_lm' )
__snake_case : List[str] = WavaVecaProcessorWithLM.from_pretrained(_UpperCAmelCase )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_UpperCAmelCase )
__snake_case : Tuple = os.listdir(_UpperCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : str = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : Dict = floats_list((3, 1_000) )
__snake_case : Dict = processor_wavaveca(_UpperCAmelCase , return_tensors='np' )
__snake_case : str = processor_auto(_UpperCAmelCase , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : Optional[Any] = processor_wavaveca.batch_decode(_UpperCAmelCase )
__snake_case : Optional[int] = processor_auto.batch_decode(_UpperCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : List[Any] = self._get_dummy_logits()[0]
__snake_case : Dict = processor.decode(_UpperCAmelCase , output_word_offsets=_UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowercase_ ( self ):
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__snake_case : Tuple = self._get_dummy_logits()
__snake_case : str = processor.batch_decode(_UpperCAmelCase , output_word_offsets=_UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(_UpperCAmelCase , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self ):
import torch
__snake_case : Tuple = load_dataset('common_voice' , 'en' , split='train' , streaming=_UpperCAmelCase )
__snake_case : Any = ds.cast_column('audio' , datasets.Audio(sampling_rate=16_000 ) )
__snake_case : Tuple = iter(_UpperCAmelCase )
__snake_case : Optional[Any] = next(_UpperCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__snake_case : Any = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : Optional[int] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__snake_case : Optional[int] = model(_UpperCAmelCase ).logits.cpu().numpy()
__snake_case : Dict = processor.decode(logits[0] , output_word_offsets=_UpperCAmelCase )
__snake_case : Union[str, Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : List[Any] = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__snake_case : Tuple = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(_UpperCAmelCase , 'word' ) ) , _UpperCAmelCase )
self.assertEqual(' '.join(self.get_from_offsets(_UpperCAmelCase , 'word' ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_UpperCAmelCase , 'start_time' ) )
__snake_case : Optional[int] = torch.tensor(self.get_from_offsets(_UpperCAmelCase , 'end_time' ) )
# fmt: off
__snake_case : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=0.01 ) )
| 576 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] ) -> float:
'''simple docstring'''
lowercase = 0.00
lowercase = 0
for resistor in resistors:
if resistor <= 0:
lowercase = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowerCAmelCase__ )
first_sum += 1 / float(lowerCAmelCase__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] ) -> float:
'''simple docstring'''
lowercase = 0.00
lowercase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase = f'Resistor at index {index} has a negative value!'
raise ValueError(lowerCAmelCase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 197 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : str ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase : Optional[int] ={
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase : Optional[int] ={
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] = ['input_ids', 'attention_mask']
snake_case__ : Dict = BartTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
lowercase = getattr(__lowerCAmelCase , pre_tok_state.pop("""type""" ) )
lowercase = add_prefix_space
lowercase = pre_tok_class(**__lowerCAmelCase )
lowercase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase = """post_processor"""
lowercase = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase = tuple(state["""sep"""] )
if "cls" in state:
lowercase = tuple(state["""cls"""] )
lowercase = False
if state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
lowercase = add_prefix_space
lowercase = True
if state.get("""trim_offsets""" , __lowerCAmelCase ) != trim_offsets:
lowercase = trim_offsets
lowercase = True
if changes_to_apply:
lowercase = getattr(__lowerCAmelCase , state.pop("""type""" ) )
lowercase = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
lowercase = value
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 197 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=snake_case ):
UpperCamelCase =["keras_nlp"]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
requires_backends(self , ['''keras_nlp'''] )
| 76 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCamelCase : Optional[int] = {
"""E""": 1_2.7_0,
"""T""": 9.0_6,
"""A""": 8.1_7,
"""O""": 7.5_1,
"""I""": 6.9_7,
"""N""": 6.7_5,
"""S""": 6.3_3,
"""H""": 6.0_9,
"""R""": 5.9_9,
"""D""": 4.2_5,
"""L""": 4.0_3,
"""C""": 2.7_8,
"""U""": 2.7_6,
"""M""": 2.4_1,
"""W""": 2.3_6,
"""F""": 2.2_3,
"""G""": 2.0_2,
"""Y""": 1.9_7,
"""P""": 1.9_3,
"""B""": 1.2_9,
"""V""": 0.9_8,
"""K""": 0.7_7,
"""J""": 0.1_5,
"""X""": 0.1_5,
"""Q""": 0.1_0,
"""Z""": 0.0_7,
}
__lowerCamelCase : Union[str, Any] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
__lowerCamelCase : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return x[0]
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = get_letter_count(__UpperCAmelCase )
lowerCamelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__UpperCAmelCase )
lowerCamelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__UpperCAmelCase )
lowerCamelCase_ : Any = ''''''.join(freq_to_letter[freq] )
lowerCamelCase_ : Union[str, Any] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__UpperCAmelCase , reverse=__UpperCAmelCase )
lowerCamelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__UpperCAmelCase )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_frequency_order(__UpperCAmelCase )
lowerCamelCase_ : Tuple = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 501 | 0 |
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCamelCase ( _a ):
a : Optional[int] =["""image_processor"""]
a : List[Any] ="""SamImageProcessor"""
def __init__( self , snake_case_ ) -> Any:
super().__init__(snake_case_ )
UpperCamelCase__ = self.image_processor
UpperCamelCase__ = -10
UpperCamelCase__ = self.image_processor.size['longest_edge']
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
UpperCamelCase__ = self.image_processor(
snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# pop arguments that are not used in the foward but used nevertheless
UpperCamelCase__ = encoding_image_processor['original_sizes']
if hasattr(snake_case_ , 'numpy' ): # Checks if Torch or TF tensor
UpperCamelCase__ = original_sizes.numpy()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._check_and_preprocess_points(
input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , )
UpperCamelCase__ = self._normalize_and_convert(
snake_case_ , snake_case_ , input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , return_tensors=snake_case_ , )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="pt" , ) -> Tuple:
if input_points is not None:
if len(snake_case_ ) != len(snake_case_ ):
UpperCamelCase__ = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0] ) for point in input_points
]
else:
UpperCamelCase__ = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_ )
for point, original_size in zip(snake_case_ , snake_case_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCamelCase__ , UpperCamelCase__ = self._pad_points_and_labels(snake_case_ , snake_case_ )
UpperCamelCase__ = np.array(snake_case_ )
if input_labels is not None:
UpperCamelCase__ = np.array(snake_case_ )
if input_boxes is not None:
if len(snake_case_ ) != len(snake_case_ ):
UpperCamelCase__ = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0] , is_bounding_box=snake_case_ )
for box in input_boxes
]
else:
UpperCamelCase__ = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_ , is_bounding_box=snake_case_ )
for box, original_size in zip(snake_case_ , snake_case_ )
]
UpperCamelCase__ = np.array(snake_case_ )
if input_boxes is not None:
if return_tensors == "pt":
UpperCamelCase__ = torch.from_numpy(snake_case_ )
# boxes batch size of 1 by default
UpperCamelCase__ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCamelCase__ = tf.convert_to_tensor(snake_case_ )
# boxes batch size of 1 by default
UpperCamelCase__ = tf.expand_dims(snake_case_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCamelCase__ = torch.from_numpy(snake_case_ )
# point batch size of 1 by default
UpperCamelCase__ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCamelCase__ = tf.convert_to_tensor(snake_case_ )
# point batch size of 1 by default
UpperCamelCase__ = tf.expand_dims(snake_case_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCamelCase__ = torch.from_numpy(snake_case_ )
# point batch size of 1 by default
UpperCamelCase__ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCamelCase__ = tf.convert_to_tensor(snake_case_ )
# point batch size of 1 by default
UpperCamelCase__ = tf.expand_dims(snake_case_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ ) -> List[str]:
UpperCamelCase__ = max([point.shape[0] for point in input_points] )
UpperCamelCase__ = []
for i, point in enumerate(snake_case_ ):
if point.shape[0] != expected_nb_points:
UpperCamelCase__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCamelCase__ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(snake_case_ )
UpperCamelCase__ = processed_input_points
return input_points, input_labels
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False ) -> np.ndarray:
UpperCamelCase__ , UpperCamelCase__ = original_size
UpperCamelCase__ , UpperCamelCase__ = self.image_processor._get_preprocess_shape(snake_case_ , longest_edge=snake_case_ )
UpperCamelCase__ = deepcopy(snake_case_ ).astype(snake_case_ )
if is_bounding_box:
UpperCamelCase__ = coords.reshape(-1 , 2 , 2 )
UpperCamelCase__ = coords[..., 0] * (new_w / old_w)
UpperCamelCase__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCamelCase__ = coords.reshape(-1 , 4 )
return coords
def SCREAMING_SNAKE_CASE__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , ) -> Union[str, Any]:
if input_points is not None:
if hasattr(snake_case_ , 'numpy' ): # Checks for TF or Torch tensor
UpperCamelCase__ = input_points.numpy().tolist()
if not isinstance(snake_case_ , snake_case_ ) or not isinstance(input_points[0] , snake_case_ ):
raise ValueError('Input points must be a list of list of floating points.' )
UpperCamelCase__ = [np.array(snake_case_ ) for input_point in input_points]
else:
UpperCamelCase__ = None
if input_labels is not None:
if hasattr(snake_case_ , 'numpy' ):
UpperCamelCase__ = input_labels.numpy().tolist()
if not isinstance(snake_case_ , snake_case_ ) or not isinstance(input_labels[0] , snake_case_ ):
raise ValueError('Input labels must be a list of list integers.' )
UpperCamelCase__ = [np.array(snake_case_ ) for label in input_labels]
else:
UpperCamelCase__ = None
if input_boxes is not None:
if hasattr(snake_case_ , 'numpy' ):
UpperCamelCase__ = input_boxes.numpy().tolist()
if (
not isinstance(snake_case_ , snake_case_ )
or not isinstance(input_boxes[0] , snake_case_ )
or not isinstance(input_boxes[0][0] , snake_case_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
UpperCamelCase__ = [np.array(snake_case_ ).astype(np.floataa ) for box in input_boxes]
else:
UpperCamelCase__ = None
return input_points, input_labels, input_boxes
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case_ , **snake_case_ ) -> Dict:
return self.image_processor.post_process_masks(*snake_case_ , **snake_case_ )
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Dict = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = image_size
_lowercase : Optional[int] = patch_size
_lowercase : str = num_channels
_lowercase : Optional[int] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : Optional[Any] = hidden_act
_lowercase : List[Any] = conv_kernel_size
_lowercase : Any = output_stride
_lowercase : Optional[int] = classifier_dropout_prob
_lowercase : int = use_labels
_lowercase : Any = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : List[Any] = initializer_range
_lowercase : int = scope
_lowercase : Optional[Any] = width_multiplier
_lowercase : List[Any] = ffn_dropout
_lowercase : List[Any] = attn_dropout
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Optional[int] = None
_lowercase : int = None
if self.use_labels:
_lowercase : Any = ids_tensor([self.batch_size], self.num_labels)
_lowercase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Tuple = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.num_labels
_lowercase : Dict = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : List[str] = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : Union[str, Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : int = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : List[str] = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
lowercase_ : str = False
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = MobileViTVaModelTester(self)
_lowercase : Any = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : str = model_class(lowerCamelCase)
_lowercase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : str = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : str = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : List[Any] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : Optional[int] = outputs.hidden_states
_lowercase : Dict = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Dict = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Any = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Union[str, Any] = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Optional[int]:
_lowercase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : List[Any] = self.default_image_processor
_lowercase : Dict = prepare_img()
_lowercase : Any = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
# verify the logits
_lowercase : List[str] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Dict = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : List[Any] = prepare_img()
_lowercase : Union[str, Any] = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[str] = model(**lowerCamelCase)
_lowercase : Union[str, Any] = outputs.logits
# verify the logits
_lowercase : List[Any] = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Dict = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : List[str] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : List[str] = prepare_img()
_lowercase : List[str] = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Optional[Any] = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Any = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 89 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = DebertaTokenizer
__magic_name__ = True
__magic_name__ = DebertaTokenizerFast
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
A : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A : Tuple = {'''unk_token''': '''[UNK]'''}
A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Union[str, Any] = '''lower newer'''
A : List[Any] = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = self.get_tokenizer()
A : List[str] = '''lower newer'''
A : int = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A : Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : str = tokens + [tokenizer.unk_token]
A : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = self.get_tokenizer()
A : Tuple = tokenizer('''Hello''' , '''World''' )
A : str = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
A : List[str] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A : List[str] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A : Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A : List[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE )
A : List[str] = [tokenizer.decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
A : List[str] = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A : Any = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE )
for expected, decoded in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 634 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A = logging.get_logger(__name__)
A = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
A = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
A = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase__ ( ) -> Optional[Any]:
A = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
A = bs[:]
A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
A = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( lowerCamelCase__ ) -> Optional[Any]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[Any]="replace" , snake_case : List[str]="<s>" , snake_case : str="</s>" , snake_case : str="</s>" , snake_case : str="<s>" , snake_case : List[str]="<unk>" , snake_case : List[str]="<pad>" , snake_case : Optional[Any]="<mask>" , snake_case : str=False , **snake_case : Any , ) -> str:
'''simple docstring'''
A = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
A = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
A = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
A = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
A = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
A = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
A = json.load(snake_case )
A = {v: k for k, v in self.encoder.items()}
A = errors # how to handle errors in decoding
A = bytes_to_unicode()
A = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[1:-1]
A = [tuple(merge.split() ) for merge in bpe_merges]
A = dict(zip(snake_case , range(len(snake_case ) ) ) )
A = {}
A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def A_ ( self : str ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self : Any , snake_case : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A = tuple(snake_case )
A = get_pairs(snake_case )
if not pairs:
return token
while True:
A = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(snake_case ):
try:
A = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(snake_case )
A = new_word
if len(snake_case ) == 1:
break
else:
A = get_pairs(snake_case )
A = ' '.join(snake_case )
A = word
return word
def A_ ( self : List[str] , snake_case : Dict ) -> List[Any]:
'''simple docstring'''
A = []
for token in re.findall(self.pat , snake_case ):
A = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(' ' ) )
return bpe_tokens
def A_ ( self : int , snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def A_ ( self : Union[str, Any] , snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.decoder.get(snake_case )
def A_ ( self : Optional[Any] , snake_case : Any ) -> Optional[int]:
'''simple docstring'''
A = ''.join(snake_case )
A = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def A_ ( self : List[str] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '\n' )
A = 0
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
def A_ ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def A_ ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : List[str]=False , **snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
A = ' ' + text
return (text, kwargs)
def A_ ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> Dict:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def A_ ( self : Optional[Any] , snake_case : "Conversation" ) -> List[int]:
'''simple docstring'''
A = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case )
A = ' '.join(snake_case )
A = self.encode(snake_case )
if len(snake_case ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 702 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
A = tempfile.mkdtemp()
A = BlipImageProcessor()
A = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
A = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
A = InstructBlipProcessor(snake_case , snake_case , snake_case )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : List[str] , **snake_case : str ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer
def A_ ( self : int , **snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def A_ ( self : Any , **snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).qformer_tokenizer
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
self.assertIsInstance(processor.qformer_tokenizer , snake_case )
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = self.prepare_image_inputs()
A = image_processor(snake_case , return_tensors='np' )
A = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = 'lower newer'
A = processor(text=snake_case )
A = tokenizer(snake_case , return_token_type_ids=snake_case )
A = qformer_tokenizer(snake_case , return_token_type_ids=snake_case )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = 'lower newer'
A = self.prepare_image_inputs()
A = processor(text=snake_case , images=snake_case )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(snake_case )
A = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = 'lower newer'
A = self.prepare_image_inputs()
A = processor(text=snake_case , images=snake_case )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 109 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : str , *UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Optional[int] ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = eval_examples
SCREAMING_SNAKE_CASE__ = post_process_function
def A_ ( self : Any , UpperCAmelCase_ : Optional[Dataset] = None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : str = "eval" , **UpperCAmelCase_ : List[str] , ):
SCREAMING_SNAKE_CASE__ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE__ = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
SCREAMING_SNAKE_CASE__ = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
SCREAMING_SNAKE_CASE__ = gen_kwargs
SCREAMING_SNAKE_CASE__ = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE__ = self.get_eval_dataloader(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ = self.compute_metrics
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE__ = eval_loop(
UpperCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , metric_key_prefix=UpperCAmelCase_ , )
finally:
SCREAMING_SNAKE_CASE__ = compute_metrics
SCREAMING_SNAKE_CASE__ = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCAmelCase_ , UpperCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE__ = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE__ = metrics.pop(UpperCAmelCase_ )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_ )
return metrics
def A_ ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : str = "test" , **UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = gen_kwargs.copy()
SCREAMING_SNAKE_CASE__ = self.get_test_dataloader(UpperCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE__ = self.compute_metrics
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE__ = eval_loop(
UpperCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , metric_key_prefix=UpperCAmelCase_ , )
finally:
SCREAMING_SNAKE_CASE__ = compute_metrics
SCREAMING_SNAKE_CASE__ = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCAmelCase_ , UpperCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE__ = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 'predict' )
SCREAMING_SNAKE_CASE__ = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE__ = metrics.pop(UpperCAmelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_ )
| 472 |
from importlib import import_module
from .logging import get_logger
__snake_case = get_logger(__name__)
class lowercase__ :
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int=None ):
SCREAMING_SNAKE_CASE__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = module._original_module if isinstance(UpperCAmelCase_ , _PatchedModuleObj ) else module
class lowercase__ :
A__ : Optional[int] =[]
def __init__( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=None ):
SCREAMING_SNAKE_CASE__ = obj
SCREAMING_SNAKE_CASE__ = target
SCREAMING_SNAKE_CASE__ = new
SCREAMING_SNAKE_CASE__ = target.split('.' )[0]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = attrs or []
def __enter__( self : int ):
*SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCAmelCase_ ) ):
try:
SCREAMING_SNAKE_CASE__ = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
SCREAMING_SNAKE_CASE__ = getattr(self.obj , UpperCAmelCase_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCAmelCase_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
SCREAMING_SNAKE_CASE__ = obj_attr
# patch at top level
setattr(self.obj , UpperCAmelCase_ , _PatchedModuleObj(UpperCAmelCase_ , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE__ = getattr(self.obj , UpperCAmelCase_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , _PatchedModuleObj(getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
# finally set the target attribute
setattr(UpperCAmelCase_ , UpperCAmelCase_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
SCREAMING_SNAKE_CASE__ = getattr(import_module('.'.join(UpperCAmelCase_ ) ) , UpperCAmelCase_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCAmelCase_ ) is attr_value:
SCREAMING_SNAKE_CASE__ = getattr(self.obj , UpperCAmelCase_ )
setattr(self.obj , UpperCAmelCase_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
SCREAMING_SNAKE_CASE__ = globals()['__builtins__'][target_attr]
setattr(self.obj , UpperCAmelCase_ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Any , *UpperCAmelCase_ : Dict ):
for attr in list(self.original ):
setattr(self.obj , UpperCAmelCase_ , self.original.pop(UpperCAmelCase_ ) )
def A_ ( self : Any ):
self.__enter__()
self._active_patches.append(self )
def A_ ( self : List[str] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 472 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase )
class __A:
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
@dataclass(frozen=UpperCAmelCase )
class __A:
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = 4_2
def __init__( self : Any , __UpperCamelCase : str , __UpperCamelCase : PreTrainedTokenizer , __UpperCamelCase : str , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Dict=False , __UpperCamelCase : bool = False , ):
lowerCamelCase_ = hans_processors[task]()
lowerCamelCase_ = os.path.join(
UpperCamelCase_ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(UpperCamelCase_ ) , UpperCamelCase_ , ) , )
lowerCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_ = label_list[2], label_list[1]
lowerCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + '.lock'
with FileLock(UpperCamelCase_ ):
if os.path.exists(UpperCamelCase_ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
lowerCamelCase_ = torch.load(UpperCamelCase_ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
lowerCamelCase_ = (
processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
)
logger.info("""Training examples: %s""" , len(UpperCamelCase_ ) )
lowerCamelCase_ = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info("""Saving features into cached file %s""" , UpperCamelCase_ )
torch.save(self.features , UpperCamelCase_ )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : List[Any] , __UpperCamelCase : Tuple ):
return self.features[i]
def lowercase__ ( self : Optional[int] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A:
SCREAMING_SNAKE_CASE = 4_2
def __init__( self : int , __UpperCamelCase : str , __UpperCamelCase : PreTrainedTokenizer , __UpperCamelCase : str , __UpperCamelCase : Optional[int] = 1_2_8 , __UpperCamelCase : Tuple=False , __UpperCamelCase : bool = False , ):
lowerCamelCase_ = hans_processors[task]()
lowerCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_ = label_list[2], label_list[1]
lowerCamelCase_ = label_list
lowerCamelCase_ = processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
lowerCamelCase_ = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(UpperCamelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase_ = tf.data.Dataset.from_generator(
UpperCamelCase_ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase__ ( self : Optional[int] ):
return self.dataset
def __len__( self : str ):
return len(self.features )
def __getitem__( self : Dict , __UpperCamelCase : Union[str, Any] ):
return self.features[i]
def lowercase__ ( self : Union[str, Any] ):
return self.label_list
class __A( UpperCAmelCase ):
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , """heuristics_train_set.txt""" ) ) , """train""" )
def lowercase__ ( self : Any , __UpperCamelCase : Dict ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def lowercase__ ( self : str ):
return ["contradiction", "entailment", "neutral"]
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple ):
lowerCamelCase_ = []
for i, line in enumerate(UpperCamelCase_ ):
if i == 0:
continue
lowerCamelCase_ = '%s-%s' % (set_type, line[0])
lowerCamelCase_ = line[5]
lowerCamelCase_ = line[6]
lowerCamelCase_ = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowerCamelCase_ = line[0]
examples.append(InputExample(guid=UpperCamelCase_ , text_a=UpperCamelCase_ , text_b=UpperCamelCase_ , label=UpperCamelCase_ , pairID=UpperCamelCase_ ) )
return examples
def __lowerCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : int , ) -> Any:
lowerCamelCase_ = {label: i for i, label in enumerate(_lowercase )}
lowerCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(_lowercase ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowerCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=_lowercase , max_length=_lowercase , padding="""max_length""" , truncation=_lowercase , return_overflowing_tokens=_lowercase , )
lowerCamelCase_ = label_map[example.label] if example.label in label_map else 0
lowerCamelCase_ = int(example.pairID )
features.append(InputFeatures(**_lowercase , label=_lowercase , pairID=_lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
lowercase = {
'''hans''': 3,
}
lowercase = {
'''hans''': HansProcessor,
}
| 704 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt'''}
lowercase = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
lowercase = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def __lowerCAmelCase ( UpperCAmelCase__ : List[str] ) -> Dict:
lowerCamelCase_ = collections.OrderedDict()
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as reader:
lowerCamelCase_ = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
lowerCamelCase_ = token.rstrip("""\n""" )
lowerCamelCase_ = index
return vocab
class __A( UpperCAmelCase ):
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str]="<unk>" , __UpperCamelCase : List[str]=2_0_0 ):
lowerCamelCase_ = vocab
lowerCamelCase_ = unk_token
lowerCamelCase_ = max_input_chars_per_word
def lowercase__ ( self : int , __UpperCamelCase : int ):
lowerCamelCase_ = list(__UpperCamelCase )
if len(__UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCamelCase_ = 0
lowerCamelCase_ = []
while start < len(__UpperCamelCase ):
lowerCamelCase_ = len(__UpperCamelCase )
lowerCamelCase_ = None
while start < end:
lowerCamelCase_ = """""".join(chars[start:end] )
if substr in self.vocab:
lowerCamelCase_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCamelCase )
lowerCamelCase_ = end
return sub_tokens
class __A( UpperCAmelCase ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE = False
def __init__( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]="<d>" , __UpperCamelCase : List[str]="</d>" , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : List[str]="<pad>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : List[Any]="</n>" , __UpperCamelCase : Tuple="</_>" , __UpperCamelCase : Optional[Any]="left" , **__UpperCamelCase : List[str] , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__UpperCamelCase , eod_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , unk_token=__UpperCamelCase , line_token=__UpperCamelCase , space_token=__UpperCamelCase , padding_side=__UpperCamelCase , **__UpperCamelCase , )
lowerCamelCase_ = bod_token
lowerCamelCase_ = eod_token
lowerCamelCase_ = load_vocab(__UpperCamelCase )
lowerCamelCase_ = self.encoder[space_token]
lowerCamelCase_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCamelCase : x[1] ) )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase__ ( self : Any ):
return self.encoder[self.bod_token]
@property
def lowercase__ ( self : Any ):
return self.encoder[self.eod_token]
@property
def lowercase__ ( self : int ):
return self.encoder["\n"]
@property
def lowercase__ ( self : str ):
return len(self.encoder )
def lowercase__ ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Any , __UpperCamelCase : Optional[Any] ):
lowerCamelCase_ = []
for x in jieba.cut(__UpperCamelCase , cut_all=__UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCamelCase ) )
return output_tokens
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict , **__UpperCamelCase : str ):
lowerCamelCase_ = [i for i in token_ids if i >= 0]
lowerCamelCase_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Any] ):
return token in self.encoder
def lowercase__ ( self : int , __UpperCamelCase : List[str] ):
return "".join(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : int ):
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Any , __UpperCamelCase : Union[str, Any] ):
return self.decoder.get(__UpperCamelCase , self.unk_token )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if os.path.isdir(__UpperCamelCase ):
lowerCamelCase_ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
lowerCamelCase_ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
lowerCamelCase_ = 0
if " " in self.encoder:
lowerCamelCase_ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
lowerCamelCase_ = self.encoder["""\n"""]
del self.encoder["\n"]
lowerCamelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCamelCase : x[1] ) )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
lowerCamelCase_ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowercase__ ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase ))
| 103 | 0 |
import argparse
lowerCAmelCase_ = '''docs/source/_static/js/custom.js'''
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : List[str] = f.readlines()
snake_case_ : str = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
snake_case_ : str = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowerCAmelCase_ = parser.parse_args()
update_custom_js(args.version)
| 60 |
'''simple docstring'''
import math
def __UpperCamelCase ( a : int ) ->list[int]:
snake_case = []
snake_case = 2
snake_case = int(math.sqrt(a ) ) # Size of every segment
snake_case = [True] * (end + 1)
snake_case = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
snake_case = False
start += 1
prime += in_prime
snake_case = end + 1
snake_case = min(2 * end , a )
while low <= n:
snake_case = [True] * (high - low + 1)
for each in in_prime:
snake_case = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
snake_case = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
snake_case = high + 1
snake_case = min(high + end , a )
return prime
print(sieve(10**6))
| 342 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( __snake_case ):
def __init__( self : Any , *a : Union[str, Any] , **a : str ) -> None:
"""simple docstring"""
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , a , )
super().__init__(*a , **a ) | 396 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def A_ ( __UpperCamelCase : Features ):
lowercase = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and feature.dtype == "binary":
lowercase = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase , __UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _lowerCAmelCase ( __snake_case ):
def __init__( self : str , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[int] = None , **a : Dict , ) -> Any:
"""simple docstring"""
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
lowercase = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__( self : Optional[Any] , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , **a : int , ) -> Optional[Any]:
"""simple docstring"""
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size or get_writer_batch_size(dataset.features )
lowercase = parquet_writer_kwargs
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
lowercase = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
lowercase = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _lowerCAmelCase ( self : Optional[int] , a : BinaryIO , a : int , **a : List[Any] ) -> int:
"""simple docstring"""
lowercase = 0
lowercase = parquet_writer_kwargs.pop('''path_or_buf''' , a )
lowercase = self.dataset.features.arrow_schema
lowercase = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
lowercase = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written | 396 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Optional[int] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=8 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=5_12 , __lowerCAmelCase=5_12 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ = np.array(pil_image.convert("""RGB""" ) )
lowercase_ = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ = np.transpose(__lowerCAmelCase , [2, 0, 1] )
lowercase_ = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[int] , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
lowercase_ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = min(int(num_inference_steps * strength) , lowerCAmelCase_)
lowercase_ = max(num_inference_steps - init_timestep , 0)
lowercase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_)}''')
lowercase_ = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_)
lowercase_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and len(lowerCAmelCase_) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCAmelCase_)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = [
self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(lowerCAmelCase_)
]
lowercase_ = torch.cat(lowerCAmelCase_ , dim=0)
else:
lowercase_ = self.movq.encode(lowerCAmelCase_).latent_dist.sample(lowerCAmelCase_)
lowercase_ = self.movq.config.scaling_factor * init_latents
lowercase_ = torch.cat([init_latents] , dim=0)
lowercase_ = init_latents.shape
lowercase_ = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_)
# get latents
lowercase_ = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = init_latents
return latents
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple=0):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
lowercase_ = torch.device(F'''cuda:{gpu_id}''')
lowercase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str]=0):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0"""):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""")
lowercase_ = torch.device(F'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCAmelCase_)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_)
# We'll offload the last model manually.
lowercase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook"""):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_)
def __call__( self : str , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
lowercase_ = self._execution_device
lowercase_ = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = torch.cat(lowerCAmelCase_ , dim=0)
lowercase_ = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = torch.cat(lowerCAmelCase_ , dim=0)
if do_classifier_free_guidance:
lowercase_ = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0)
lowercase_ = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0)
lowercase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowerCAmelCase_)
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_):
lowercase_ = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor)) for i in image):
raise ValueError(
F'''Input is in incorrect format: {[type(lowerCAmelCase_) for i in image]}. Currently, we only support PIL image and pytorch tensor''')
lowercase_ = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) for i in image] , dim=0)
lowercase_ = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_)
lowercase_ = self.movq.encode(lowerCAmelCase_)["""latents"""]
lowercase_ = latents.repeat_interleave(lowerCAmelCase_ , dim=0)
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ , lowercase_ = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = timesteps[:1].repeat(batch_size * num_images_per_prompt)
lowercase_ , lowercase_ = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor)
lowercase_ = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_)
for i, t in enumerate(self.progress_bar(lowerCAmelCase_)):
# expand the latents if we are doing classifier free guidance
lowercase_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase_ = {"""image_embeds""": image_embeds}
lowercase_ = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ = noise_pred.split(latents.shape[1] , dim=1)
lowercase_ , lowercase_ = noise_pred.chunk(2)
lowercase_ , lowercase_ = variance_pred.chunk(2)
lowercase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , """variance_type""")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
lowercase_ = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
lowercase_ = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_)["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
lowercase_ = image * 0.5 + 0.5
lowercase_ = image.clamp(0 , 1)
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
lowercase_ = self.numpy_to_pil(lowerCAmelCase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_)
| 567 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Optional[Any]:
a__ : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def SCREAMING_SNAKE_CASE( __UpperCamelCase = 50_00 ) -> str:
a__ : Union[str, Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , a_ )]
for i, pentagonal_i in enumerate(a_ ):
for j in range(a_ , len(a_ ) ):
a__ : Dict = pentagonal_nums[j]
a__ : Any = pentagonal_i + pentagonal_j
a__ : str = pentagonal_j - pentagonal_i
if is_pentagonal(a_ ) and is_pentagonal(a_ ):
return b
return -1
if __name__ == "__main__":
print(F'{solution() = }')
| 705 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase = TypeVar("""T""")
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = True ):
"""simple docstring"""
a__ : dict[T, list[T]] = {} # dictionary of lists
a__ : Dict = directed
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
self.adj_list[destination_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
a__ : Optional[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCAmelCase )
a__ : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a__ : str = [destination_vertex]
a__ : int = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
a__ : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a__ : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a__ : List[str] = [destination_vertex]
a__ : int = []
return self
def __repr__( self ):
"""simple docstring"""
return pformat(self.adj_list )
| 207 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1
while repunit:
lowerCamelCase__ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A__ ( __lowerCAmelCase : int = 100_0000 ):
lowerCamelCase__ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 1 |
"""simple docstring"""
from __future__ import annotations
a :Dict = list[list[int]]
# assigning initial values to the grid
a :Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a :Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowercase ( __lowerCAmelCase ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowercase ( __lowerCAmelCase ) -> Matrix | None:
if location := find_empty_location(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = digit
if sudoku(__lowerCAmelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE__ : str = 0
return None
def _lowercase ( __lowerCAmelCase ) -> None:
for row in grid:
for cell in row:
print(__lowerCAmelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
a :Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 12 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = (DDPMScheduler,)
def _a ( self , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_a )
return config
def _a ( self ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _a ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def _a ( self ) -> Any:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def _a ( self ) -> int:
"""simple docstring"""
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def _a ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _a ( self ) -> str:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Any = len(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : int = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : str = pred_prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Dict = len(_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : int = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
else:
SCREAMING_SNAKE_CASE__ : Tuple = timesteps[i + 1]
SCREAMING_SNAKE_CASE__ : int = scheduler.previous_timestep(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prev_t.item()
self.assertEqual(_a , _a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : int = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE__ : List[str] = len(_a )
with self.assertRaises(_a , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_a )
| 12 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = ['''image_processor''', '''tokenizer''']
A__ : Tuple = '''BlipImageProcessor'''
A__ : str = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
_snake_case = self.image_processor
def __call__( self : Optional[int] , __lowerCamelCase : ImageInput = None , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : List[str] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_snake_case = self.tokenizer
_snake_case = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
_snake_case = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
_snake_case = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
_snake_case = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def __UpperCAmelCase ( self : Dict , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = self.tokenizer.model_input_names
_snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 103 |
"""simple docstring"""
from copy import deepcopy
class UpperCAmelCase :
def __init__( self : Optional[Any] , __lowerCamelCase : list[int] | None = None , __lowerCamelCase : int | None = None ):
"""simple docstring"""
if arr is None and size is not None:
_snake_case = size
_snake_case = [0] * size
elif arr is not None:
self.init(__lowerCamelCase )
else:
raise ValueError('''Either arr or size must be specified''' )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : list[int] ):
"""simple docstring"""
_snake_case = len(__lowerCamelCase )
_snake_case = deepcopy(__lowerCamelCase )
for i in range(1 , self.size ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index - (index & (-index))
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_snake_case = self.next_(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
self.add(__lowerCamelCase , value - self.get(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if right == 0:
return 0
_snake_case = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_snake_case = self.prev(__lowerCamelCase )
return result
def __UpperCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return self.prefix(__lowerCamelCase ) - self.prefix(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
return self.query(__lowerCamelCase , index + 1 )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
_snake_case = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_snake_case = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case ( _lowerCamelCase ):
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : int = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(__UpperCamelCase ) )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Optional[int] = [sequences]
snake_case__ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__UpperCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_lowerCamelCase )
class __snake_case ( _lowerCamelCase ):
def __init__( self , __UpperCamelCase=ZeroShotClassificationArgumentHandler() , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = args_parser
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def __a ( self ) -> int:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def __a ( self , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=TruncationStrategy.ONLY_FIRST , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
snake_case__ : Optional[Any] = self.tokenizer.eos_token
try:
snake_case__ : List[Any] = self.tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , )
except Exception as e:
if "too short" in str(__UpperCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
snake_case__ : int = self.tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , padding=__UpperCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __a ( self , **__UpperCamelCase ) -> Any:
'''simple docstring'''
if kwargs.get('multi_class' , __UpperCamelCase ) is not None:
snake_case__ : int = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
snake_case__ : Any = {}
if "candidate_labels" in kwargs:
snake_case__ : Any = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
snake_case__ : Tuple = kwargs['hypothesis_template']
snake_case__ : Union[str, Any] = {}
if "multi_label" in kwargs:
snake_case__ : Union[str, Any] = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
if len(__UpperCamelCase ) == 0:
pass
elif len(__UpperCamelCase ) == 1 and "candidate_labels" not in kwargs:
snake_case__ : Tuple = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="This example is {}." ) -> int:
'''simple docstring'''
snake_case__ , snake_case__ : List[Any] = self._args_parser(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
snake_case__ : List[str] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__UpperCamelCase ) - 1,
**model_input,
}
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = inputs['candidate_label']
snake_case__ : Optional[Any] = inputs['sequence']
snake_case__ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
snake_case__ : Tuple = self.model(**__UpperCamelCase )
snake_case__ : Dict = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def __a ( self , __UpperCamelCase , __UpperCamelCase=False ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = [outputs['candidate_label'] for outputs in model_outputs]
snake_case__ : Tuple = [outputs['sequence'] for outputs in model_outputs]
snake_case__ : Dict = np.concatenate([output['logits'].numpy() for output in model_outputs] )
snake_case__ : Any = logits.shape[0]
snake_case__ : str = len(__UpperCamelCase )
snake_case__ : List[str] = N // n
snake_case__ : Dict = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__UpperCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
snake_case__ : Optional[int] = self.entailment_id
snake_case__ : Optional[int] = -1 if entailment_id == 0 else 0
snake_case__ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
snake_case__ : Optional[int] = np.exp(__UpperCamelCase ) / np.exp(__UpperCamelCase ).sum(-1 , keepdims=__UpperCamelCase )
snake_case__ : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
snake_case__ : Optional[Any] = reshaped_outputs[..., self.entailment_id]
snake_case__ : List[Any] = np.exp(__UpperCamelCase ) / np.exp(__UpperCamelCase ).sum(-1 , keepdims=__UpperCamelCase )
snake_case__ : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 699 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """poolformer"""
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = num_channels
snake_case__ : Dict = patch_size
snake_case__ : Optional[int] = stride
snake_case__ : str = padding
snake_case__ : List[str] = pool_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : List[Any] = mlp_ratio
snake_case__ : Union[str, Any] = depths
snake_case__ : Dict = patch_sizes
snake_case__ : Dict = strides
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Optional[Any] = use_layer_scale
snake_case__ : int = layer_scale_init_value
snake_case__ : Dict = initializer_range
super().__init__(**__UpperCamelCase )
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ) -> float:
'''simple docstring'''
return 2E-3
| 699 | 1 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Optional[Any] = []
for line in lines:
lowercase__ : int = re.sub(r'''#.*''' , '''''' , __lowerCamelCase ) # remove comments
if line:
filtered_lines.append(__lowerCamelCase )
lowercase__ : Dict = '''\n'''.join(__lowerCamelCase )
# Make a hash from all this code
lowercase__ : Any = full_str.encode('''utf-8''' )
return shaaaa(__lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
lowerCAmelCase_ = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCAmelCase_ = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCAmelCase_ = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCAmelCase_ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 560 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ConsistencyModelPipeline
lowerCAmelCase : str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowerCAmelCase : Any = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet''' ,)
return unet
@property
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet_class_cond''' ,)
return unet
def UpperCAmelCase ( self : Optional[int] ,_snake_case : int=False ) -> Dict:
"""simple docstring"""
if class_cond:
lowercase__ : Optional[int] = self.dummy_cond_unet
else:
lowercase__ : List[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
lowercase__ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Tuple ,_snake_case : List[Any]=0 ) -> str:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Dict = torch.manual_seed(_snake_case )
else:
lowercase__ : int = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : List[str] = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : Dict = ConsistencyModelPipeline(**_snake_case )
lowercase__ : List[str] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = self.get_dummy_inputs(_snake_case )
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase__ : Dict = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : int = self.get_dummy_components(class_cond=_snake_case )
lowercase__ : List[Any] = ConsistencyModelPipeline(**_snake_case )
lowercase__ : Dict = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : Dict = 0
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Union[str, Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : str = self.get_dummy_components()
lowercase__ : int = ConsistencyModelPipeline(**_snake_case )
lowercase__ : List[str] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
lowercase__ : Union[str, Any] = 1
lowercase__ : List[Any] = None
lowercase__ : Any = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : Dict = image[0, -3:, -3:, -1]
lowercase__ : List[str] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
lowercase__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Any = self.get_dummy_components(class_cond=_snake_case )
lowercase__ : int = ConsistencyModelPipeline(**_snake_case )
lowercase__ : Dict = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[str] = self.get_dummy_inputs(_snake_case )
lowercase__ : Union[str, Any] = 1
lowercase__ : Any = None
lowercase__ : Union[str, Any] = 0
lowercase__ : Optional[Any] = pipe(**_snake_case ).images
assert image.shape == (1, 32, 32, 3)
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Any=0 ,_snake_case : Optional[int]=False ,_snake_case : List[Any]="cpu" ,_snake_case : Tuple=torch.floataa ,_snake_case : List[Any]=(1, 3, 64, 64) ) -> str:
"""simple docstring"""
lowercase__ : Optional[int] = torch.manual_seed(_snake_case )
lowercase__ : Optional[int] = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
lowercase__ : str = self.get_fixed_latents(seed=_snake_case ,device=_snake_case ,dtype=_snake_case ,shape=_snake_case )
lowercase__ : Tuple = latents
return inputs
def UpperCAmelCase ( self : List[Any] ,_snake_case : int=0 ,_snake_case : Any="cpu" ,_snake_case : Optional[Any]=torch.floataa ,_snake_case : Tuple=(1, 3, 64, 64) ) -> Any:
"""simple docstring"""
if type(_snake_case ) == str:
lowercase__ : List[str] = torch.device(_snake_case )
lowercase__ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Tuple = randn_tensor(_snake_case ,generator=_snake_case ,device=_snake_case ,dtype=_snake_case )
return latents
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Optional[int] = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Union[str, Any] = self.get_inputs()
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = image[0, -3:, -3:, -1]
lowercase__ : Dict = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : str = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : List[Any] = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : int = self.get_inputs()
lowercase__ : Optional[int] = 1
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = image[0, -3:, -3:, -1]
lowercase__ : Any = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : Optional[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Tuple = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[Any] = self.get_inputs(get_fixed_latents=_snake_case ,device=_snake_case )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_snake_case ,enable_math=_snake_case ,enable_mem_efficient=_snake_case ):
lowercase__ : List[str] = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : int = image[0, -3:, -3:, -1]
lowercase__ : str = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
lowercase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
lowercase__ : Any = ConsistencyModelPipeline(unet=_snake_case ,scheduler=_snake_case )
pipe.to(torch_device=_snake_case ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = self.get_inputs(get_fixed_latents=_snake_case ,device=_snake_case )
lowercase__ : List[str] = 1
lowercase__ : Optional[Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_snake_case ,enable_math=_snake_case ,enable_mem_efficient=_snake_case ):
lowercase__ : Any = pipe(**_snake_case ).images
assert image.shape == (1, 64, 64, 3)
lowercase__ : Tuple = image[0, -3:, -3:, -1]
lowercase__ : str = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 560 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __A ( nn.Module ):
lowerCamelCase =42
lowerCamelCase =42
lowerCamelCase =0.0
lowerCamelCase =1
lowerCamelCase =1
lowerCamelCase =True
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =jnp.floataa
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : Optional[int] = []
__A : Union[str, Any] = []
for i in range(self.num_layers ):
__A : List[Any] = self.in_channels if i == 0 else self.out_channels
__A : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase )
__A : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase )
__A : List[str] = resnets
__A : Any = attentions
if self.add_downsample:
__A : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=True ):
"""simple docstring"""
__A : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__A : int = resnet(lowerCamelCase , lowerCamelCase , deterministic=lowerCamelCase )
__A : Dict = attn(lowerCamelCase , lowerCamelCase , deterministic=lowerCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
__A : int = self.downsamplers_a(lowerCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
lowerCamelCase =42
lowerCamelCase =42
lowerCamelCase =0.0
lowerCamelCase =1
lowerCamelCase =True
lowerCamelCase =jnp.floataa
def lowercase_( self : Dict ):
"""simple docstring"""
__A : str = []
for i in range(self.num_layers ):
__A : Optional[Any] = self.in_channels if i == 0 else self.out_channels
__A : int = FlaxResnetBlockaD(
in_channels=lowerCamelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase )
__A : Any = resnets
if self.add_downsample:
__A : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=True ):
"""simple docstring"""
__A : Union[str, Any] = ()
for resnet in self.resnets:
__A : Any = resnet(lowerCamelCase , lowerCamelCase , deterministic=lowerCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
__A : str = self.downsamplers_a(lowerCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
lowerCamelCase =42
lowerCamelCase =42
lowerCamelCase =42
lowerCamelCase =0.0
lowerCamelCase =1
lowerCamelCase =1
lowerCamelCase =True
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =jnp.floataa
def lowercase_( self : List[Any] ):
"""simple docstring"""
__A : List[str] = []
__A : Union[str, Any] = []
for i in range(self.num_layers ):
__A : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__A : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
__A : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase )
__A : Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase )
__A : Union[str, Any] = resnets
__A : Any = attentions
if self.add_upsample:
__A : Any = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : str=True ):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__A : List[str] = res_hidden_states_tuple[-1]
__A : Dict = res_hidden_states_tuple[:-1]
__A : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__A : Union[str, Any] = resnet(lowerCamelCase , lowerCamelCase , deterministic=lowerCamelCase )
__A : Optional[Any] = attn(lowerCamelCase , lowerCamelCase , deterministic=lowerCamelCase )
if self.add_upsample:
__A : int = self.upsamplers_a(lowerCamelCase )
return hidden_states
class __A ( nn.Module ):
lowerCamelCase =42
lowerCamelCase =42
lowerCamelCase =42
lowerCamelCase =0.0
lowerCamelCase =1
lowerCamelCase =True
lowerCamelCase =jnp.floataa
def lowercase_( self : Optional[int] ):
"""simple docstring"""
__A : Tuple = []
for i in range(self.num_layers ):
__A : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__A : str = self.prev_output_channel if i == 0 else self.out_channels
__A : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase )
__A : Any = resnets
if self.add_upsample:
__A : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=True ):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
__A : List[Any] = res_hidden_states_tuple[-1]
__A : List[Any] = res_hidden_states_tuple[:-1]
__A : Any = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__A : str = resnet(lowerCamelCase , lowerCamelCase , deterministic=lowerCamelCase )
if self.add_upsample:
__A : Dict = self.upsamplers_a(lowerCamelCase )
return hidden_states
class __A ( nn.Module ):
lowerCamelCase =42
lowerCamelCase =0.0
lowerCamelCase =1
lowerCamelCase =1
lowerCamelCase =False
lowerCamelCase =False
lowerCamelCase =jnp.floataa
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : List[str] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__A : List[Any] = []
for _ in range(self.num_layers ):
__A : List[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCamelCase )
__A : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCamelCase )
__A : Optional[Any] = resnets
__A : Union[str, Any] = attentions
def __call__( self : str , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : int=True ):
"""simple docstring"""
__A : Dict = self.resnets[0](lowerCamelCase , lowerCamelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__A : List[Any] = attn(lowerCamelCase , lowerCamelCase , deterministic=lowerCamelCase )
__A : Any = resnet(lowerCamelCase , lowerCamelCase , deterministic=lowerCamelCase )
return hidden_states
| 499 |
'''simple docstring'''
A__ : List[Any] =[
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 499 | 1 |
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]:
snake_case__ = []
snake_case__ = 11
snake_case__ = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
snake_case__ = 10
return solutions
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 2 ) -> int:
snake_case__ = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
snake_case__ = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 33 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_lowercase = pytest.mark.integration
@require_faiss
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : List[Any] ):
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__lowerCAmelCase ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase__ ( self : List[str] ):
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda __lowerCAmelCase , __lowerCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase )
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase__ ( self : Optional[int] ):
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase__ ( self : Dict ):
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__lowerCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase__ ( self : List[str] ):
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__lowerCAmelCase )
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Dict ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search_batch , queries[0] )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
import faiss
__snake_case = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCAmelCase ):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase__ ( self : int ):
import faiss
__snake_case = faiss.IndexFlat(5 )
__snake_case = FaissIndex(custom_index=__lowerCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase__ ( self : Tuple ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
index.save(tmp_file.name )
__snake_case = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase__ ( a ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f'mock://{index_name}'
index.save(a , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(a , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : int ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=__lowerCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(__lowerCAmelCase , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase , request_timeout=3_0 )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
| 356 | 0 |
'''simple docstring'''
from math import isqrt
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 ,isqrt(lowerCamelCase_) + 1))
def lowerCAmelCase__ ( lowerCamelCase_ : int = 10**6):
'''simple docstring'''
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCamelCase_)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Union[str, Any] =logging.get_logger(__name__)
__snake_case : Dict ={'vocab_file': 'sentencepiece.model'}
__snake_case : Optional[Any] ={
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
__snake_case : int ={
'google/rembert': 2_5_6,
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=False ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase="[CLS]" ,__lowerCamelCase="[SEP]" ,__lowerCamelCase="[UNK]" ,__lowerCamelCase="[SEP]" ,__lowerCamelCase="[PAD]" ,__lowerCamelCase="[CLS]" ,__lowerCamelCase="[MASK]" ,**__lowerCamelCase ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
do_lower_case=__lowerCamelCase ,remove_space=__lowerCamelCase ,keep_accents=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : Optional[Any] = remove_space
lowerCAmelCase__ : Any = keep_accents
lowerCAmelCase__ : Dict = vocab_file
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.__dict__.copy()
lowerCAmelCase__ : Dict = None
return state
def __setstate__(self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = d
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = self.sp_model.EncodeAsPieces(__lowerCamelCase )
return pieces
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.sp_model.PieceToId(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.IdToPiece(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.sp_model.decode_pieces(__lowerCamelCase )
return out_string
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 90 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE_ : Optional[int] =[2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 1_8, 2]
SCREAMING_SNAKE_CASE_ : Optional[Any] =True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ : str =True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ : Tuple =True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ : Dict =[4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ : str =[3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ : List[Any] =[2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ : int =9_6
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =9_6
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ : int =1_2_8
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] =1_9_2
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ : Dict =2_5_6
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ : str =3_5_2
# set label information
SCREAMING_SNAKE_CASE_ : Any ='''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ : List[str] ='''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ : Optional[int] ='''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ : Optional[Any] =json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ : int ={int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Tuple ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : str =FocalNetConfig(
embed_dim=UpperCAmelCase_ , depths=UpperCAmelCase_ , focal_levels=UpperCAmelCase_ , focal_windows=UpperCAmelCase_ , use_conv_embed=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ , use_post_layernorm=UpperCAmelCase_ , use_layerscale=UpperCAmelCase_ , )
return config
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict ) -> Dict:
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : List[Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ : List[Any] =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ : int ='''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] =name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ : Tuple =name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : str =name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ : int =name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ : str =name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] =name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ : Tuple ='''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] =name.replace('''head''' , '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='''focalnet.''' + name
return name
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=False ) -> List[str]:
# fmt: off
SCREAMING_SNAKE_CASE_ : Dict ={
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ : int =model_name_to_url[model_name]
print('''Checkpoint URL: ''' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : Dict =state_dict.pop(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] =val
SCREAMING_SNAKE_CASE_ : List[str] =get_focalnet_config(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =FocalNetForImageClassification(UpperCAmelCase_ )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase_ )
# verify conversion
SCREAMING_SNAKE_CASE_ : str ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ : int =BitImageProcessor(
do_resize=UpperCAmelCase_ , size={'''shortest_edge''': 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase_ , crop_size=2_2_4 , do_normalize=UpperCAmelCase_ , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple =Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
SCREAMING_SNAKE_CASE_ : Tuple =processor(images=UpperCAmelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Dict =transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE_ : Any =image_transforms(UpperCAmelCase_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase_ , atol=1E-4 )
SCREAMING_SNAKE_CASE_ : List[Any] =model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ : Any =torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ : Any =torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print(f'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(f'{model_name}' )
processor.push_to_hub(f'{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
_lowercase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 443 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = PriorTransformer
__lowerCamelCase = "hidden_states"
@property
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : List[str] =4
SCREAMING_SNAKE_CASE_ : Optional[int] =8
SCREAMING_SNAKE_CASE_ : Optional[Any] =7
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _snake_case ( self , __A=0 ) -> int:
torch.manual_seed(__A )
SCREAMING_SNAKE_CASE_ : str =4
SCREAMING_SNAKE_CASE_ : Union[str, Any] =8
SCREAMING_SNAKE_CASE_ : List[Any] =7
SCREAMING_SNAKE_CASE_ : Tuple =torch.randn((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : int =torch.randn((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _snake_case ( self ) -> Union[str, Any]:
return (4, 8)
@property
def _snake_case ( self ) -> int:
return (4, 8)
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : List[Any] ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] =self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] =self.model_class(**__A )
SCREAMING_SNAKE_CASE_ : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[int] =['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , __A )
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : Dict =PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model.to(__A )
if hasattr(__A , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE_ : List[Any] =self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str =model(**__A )[0]
SCREAMING_SNAKE_CASE_ : Any =output[0, :5].flatten().cpu()
print(__A )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE_ : int =torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(__A , __A , rtol=1e-2 ) )
@slow
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self , __A=1 , __A=768 , __A=77 , __A=0 ) -> str:
torch.manual_seed(__A )
SCREAMING_SNAKE_CASE_ : Dict =batch_size
SCREAMING_SNAKE_CASE_ : List[str] =embedding_dim
SCREAMING_SNAKE_CASE_ : Optional[int] =num_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.randn((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.randn((batch_size, embedding_dim) ).to(__A )
SCREAMING_SNAKE_CASE_ : List[str] =torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__A )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def _snake_case ( self , __A , __A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_seed_input(seed=__A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict =model(**__A )[0]
assert list(sample.shape ) == [1, 768]
SCREAMING_SNAKE_CASE_ : Dict =sample[0, :8].flatten().cpu()
print(__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.tensor(__A )
assert torch_all_close(__A , __A , atol=1e-3 )
| 443 | 1 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Tuple ) -> List[str]:
_a = []
for part_id in partition_order:
_a = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(lowercase ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> str:
_a = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_a = spark.range(100 ).repartition(1 )
_a = Spark(lowercase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> List[Any]:
_a = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_a = spark.range(10 ).repartition(2 )
_a = [1, 0]
_a = _generate_iterable_examples(lowercase , lowercase ) # Reverse the partitions.
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , lowercase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_a , _a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[int]:
_a = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_a = spark.range(10 ).repartition(1 )
_a = SparkExamplesIterable(lowercase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> Optional[Any]:
_a = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_a = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_a = lambda lowercase : x.reverse()
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [2, 1, 0] )
_a = SparkExamplesIterable(lowercase ).shuffle_data_sources(lowercase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase ):
_a , _a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> List[str]:
_a = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_a = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_a = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase ):
_a , _a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_a = SparkExamplesIterable(lowercase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_a = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase ):
_a , _a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCamelCase ( ) -> List[Any]:
_a = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_a = spark.range(100 ).repartition(1 )
_a = Spark(lowercase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 718 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *__a : Optional[int] , **__a : List[Any] ):
pass
def _lowerCamelCase ( lowercase : Image ) -> str:
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : int , __a : Tuple ):
_a = DepthEstimationPipeline(model=__a , image_processor=__a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self : int , __a : Union[str, Any] , __a : str ):
_a = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __a )
import datasets
_a = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_a = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __a , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCamelCase__ ( self : List[Any] ):
pass
@slow
@require_torch
def UpperCamelCase__ ( self : List[str] ):
_a = "Intel/dpt-large"
_a = pipeline("depth-estimation" , model=__a )
_a = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_a = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def UpperCamelCase__ ( self : Tuple ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 521 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ = StableDiffusionPanoramaPipeline
A__ = TEXT_TO_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_BATCH_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
A__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler()
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_SCREAMING_SNAKE_CASE : List[str] = CLIPTextModel(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPanoramaPipeline(**snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(**snake_case__ ).images
_SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Any = StableDiffusionPanoramaPipeline(**snake_case__ )
_SCREAMING_SNAKE_CASE : str = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = "french fries"
_SCREAMING_SNAKE_CASE : Dict = sd_pipe(**snake_case__ , negative_prompt=snake_case__ )
_SCREAMING_SNAKE_CASE : str = output.images
_SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionPanoramaPipeline(**snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = sd_pipe(**snake_case__ , view_batch_size=2 )
_SCREAMING_SNAKE_CASE : List[Any] = output.images
_SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
_SCREAMING_SNAKE_CASE : Dict = StableDiffusionPanoramaPipeline(**snake_case__ )
_SCREAMING_SNAKE_CASE : Any = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = sd_pipe(**snake_case__ ).images
_SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE : List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Optional[int] = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=snake_case__ )
_SCREAMING_SNAKE_CASE : Any = StableDiffusionPanoramaPipeline(**snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**snake_case__ ).images
_SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE : int = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self , snake_case__=0 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = torch.manual_seed(snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = "stabilityai/stable-diffusion-2-base"
_SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" )
_SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : int = self.get_inputs()
_SCREAMING_SNAKE_CASE : Any = pipe(**snake_case__ ).images
_SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_SCREAMING_SNAKE_CASE : List[str] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : List[str] = self.get_inputs()
_SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**snake_case__ ).images
_SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_SCREAMING_SNAKE_CASE : Any = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = 0
def callback_fn(snake_case__ , snake_case__ , snake_case__ ) -> None:
_SCREAMING_SNAKE_CASE : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_SCREAMING_SNAKE_CASE : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_SCREAMING_SNAKE_CASE : Union[str, Any] = latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : str = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_SCREAMING_SNAKE_CASE : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_SCREAMING_SNAKE_CASE : Dict = latents[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE : str = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Tuple = "stabilityai/stable-diffusion-2-base"
_SCREAMING_SNAKE_CASE : Dict = DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" )
_SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs()
pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE : int = "stabilityai/stable-diffusion-2-base"
_SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ )
_SCREAMING_SNAKE_CASE : str = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs()
_SCREAMING_SNAKE_CASE : Any = pipe(**snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 572 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : List[str] = 1
while repunit:
_SCREAMING_SNAKE_CASE : Tuple = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCAmelCase ( lowerCamelCase__ : int = 1_0_0_0_0_0_0 ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'{solution() = }')
| 572 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = MobileBertTokenizer
a_ = MobileBertTokenizerFast
a_ = True
a_ = True
a_ = filter_non_english
a_ = '''google/mobilebert-uncased'''
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
_lowerCAmelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_lowerCAmelCase : Union[str, Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
_lowerCAmelCase : Tuple = "unwanted, running"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_snake_case , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = "UNwant\u00E9d,running"
_lowerCAmelCase : Dict = tokenizer.tokenize(_snake_case )
_lowerCAmelCase : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase : Tuple = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase : List[str] = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : int = tokenizer.encode(_snake_case )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# With lower casing
_lowerCAmelCase : Tuple = self.get_tokenizer(do_lower_case=_snake_case )
_lowerCAmelCase : str = self.get_rust_tokenizer(do_lower_case=_snake_case )
_lowerCAmelCase : Tuple = "UNwant\u00E9d,running"
_lowerCAmelCase : Dict = tokenizer.tokenize(_snake_case )
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase : int = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase : Tuple = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : List[str] = tokenizer.encode(_snake_case )
_lowerCAmelCase : Dict = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = BasicTokenizer(do_lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=_snake_case , strip_accents=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=_snake_case , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_lowerCAmelCase : List[Any] = {}
for i, token in enumerate(_snake_case ):
_lowerCAmelCase : Dict = i
_lowerCAmelCase : Tuple = WordpieceTokenizer(vocab=_snake_case , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_snake_case ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_snake_case ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
_lowerCAmelCase : str = tokenizer.encode("sequence builders" , add_special_tokens=_snake_case )
_lowerCAmelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=_snake_case )
_lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(_snake_case )
_lowerCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase : Any = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_lowerCAmelCase : str = tokenizer_r.encode_plus(
_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case , )
_lowerCAmelCase : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_snake_case , "do_lower_case" ) else False
_lowerCAmelCase : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = ["的", "人", "有"]
_lowerCAmelCase : Union[str, Any] = "".join(_snake_case )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase : Tuple = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase : Optional[Any] = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(_snake_case )
_lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase : str = False
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
_lowerCAmelCase : List[str] = tokenizer_r.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase : List[Any] = tokenizer_p.encode(_snake_case , add_special_tokens=_snake_case )
_lowerCAmelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(_snake_case )
_lowerCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(_snake_case )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase : List[str] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_snake_case )
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , _snake_case )
| 587 | from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __A :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(_snake_case )
_lowerCAmelCase : Any = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Any = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : Union[str, Any] = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Any = {"vision_model": vision_model, "text_model": text_model}
_lowerCAmelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_lowerCAmelCase : Dict = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : Tuple = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase : List[str] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase : Dict = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase : Optional[int] = after_output[0].numpy()
_lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : List[str] = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = to_atuple(vision_model.config.image_size )
_lowerCAmelCase : Optional[int] = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase : Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase : int = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowerCAmelCase : List[str] = model_a(**_snake_case )
_lowerCAmelCase : List[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase : List[str] = model_a(**_snake_case )
_lowerCAmelCase : Any = after_outputs[0].numpy()
_lowerCAmelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
@require_tf
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase : Optional[int] = 13
_lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase : Optional[int] = random_attention_mask([batch_size, 4] )
_lowerCAmelCase : Tuple = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Optional[Any] = TFViTModel(_snake_case , name="vision_model" )
_lowerCAmelCase : Union[str, Any] = TFBertModel(_snake_case , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = TFViTModelTester(self )
_lowerCAmelCase : List[str] = TFBertModelTester(self )
_lowerCAmelCase : str = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : int = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_lowerCAmelCase : List[Any] = 13
_lowerCAmelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4] )
_lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : Tuple = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase : Tuple = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase : Any = to_atuple(vision_model.config.image_size )
_lowerCAmelCase : List[str] = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase : str = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Any = TFDeiTModel(_snake_case , name="vision_model" )
_lowerCAmelCase : int = TFRobertaModel(_snake_case , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = TFDeiTModelTester(self )
_lowerCAmelCase : Union[str, Any] = TFRobertaModelTester(self )
_lowerCAmelCase : Any = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase : List[str] = 13
_lowerCAmelCase : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase : Tuple = random_attention_mask([batch_size, 4] )
_lowerCAmelCase : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Any = TFCLIPVisionModel(_snake_case , name="vision_model" )
_lowerCAmelCase : Any = TFBertModel(_snake_case , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = TFCLIPVisionModelTester(self )
_lowerCAmelCase : Union[str, Any] = TFBertModelTester(self )
_lowerCAmelCase : str = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase : Tuple = vision_config_and_inputs
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_snake_case )
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_snake_case , padding=_snake_case , return_tensors="np" )
_lowerCAmelCase : List[Any] = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase : Any = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1E-3 ) )
| 587 | 1 |
import unittest
import numpy as np
def __UpperCAmelCase ( __A , __A , __A , __A = None , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase__ = np.shape(__A )
UpperCAmelCase__ = np.shape(__A )
UpperCAmelCase__ = np.shape(__A )
if shape_a[0] != shape_b[0]:
UpperCAmelCase__ = (
"Expected the same number of rows for A and B. "
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__A )
if shape_b[1] != shape_c[1]:
UpperCAmelCase__ = (
"Expected the same number of columns for B and C. "
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__A )
UpperCAmelCase__ = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase__ = np.linalg.inv(__A )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ = np.array([[2, 1], [6, 3]] )
UpperCAmelCase__ = schur_complement(_lowercase , _lowercase , _lowercase )
UpperCAmelCase__ = np.block([[a, b], [b.T, c]] )
UpperCAmelCase__ = np.linalg.det(_lowercase )
UpperCAmelCase__ = np.linalg.det(_lowercase )
UpperCAmelCase__ = np.linalg.det(_lowercase )
self.assertAlmostEqual(_lowercase , det_a * det_s )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_lowercase ):
schur_complement(_lowercase , _lowercase , _lowercase )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_lowercase ):
schur_complement(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 475 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Optional[int] , _lowercase : int = 1_28 , _lowercase : int = 2_56 , _lowercase : float = 2_0_0_0.0 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 64 , _lowercase : int = 20_48 , _lowercase : float = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
UpperCAmelCase__ = nn.Embedding(_lowercase , _lowercase )
UpperCAmelCase__ = False
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Dropout(p=_lowercase )
UpperCAmelCase__ = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
UpperCAmelCase__ = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
UpperCAmelCase__ = TaLayerNorm(_lowercase )
UpperCAmelCase__ = nn.Dropout(p=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : Dict , _lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _UpperCAmelCase ( self : Dict , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase__ = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase__ = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase__ = self.position_encoding(_lowercase )
UpperCAmelCase__ = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
UpperCAmelCase__ = self.dropout(_lowercase )
# decoder: No padding present.
UpperCAmelCase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase__ = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase__ = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
UpperCAmelCase__ = self.decoder_norm(_lowercase )
UpperCAmelCase__ = self.post_dropout(_lowercase )
UpperCAmelCase__ = self.spec_out(_lowercase )
return spec_out
class lowercase__ ( nn.Module ):
def __init__( self : str , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : int , _lowercase : int , _lowercase : Optional[int] , _lowercase : Union[str, Any]=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Union[str, Any]=None , _lowercase : Dict=None , _lowercase : int=None , _lowercase : Optional[int]=None , _lowercase : Any=None , ):
"""simple docstring"""
UpperCAmelCase__ = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
UpperCAmelCase__ = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase__ = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase__ = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class lowercase__ ( nn.Module ):
def __init__( self : List[str] , _lowercase : List[Any] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : str ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = TaLayerNorm(_lowercase )
UpperCAmelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
UpperCAmelCase__ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
def _UpperCAmelCase ( self : Tuple , _lowercase : Tuple , _lowercase : Optional[Any]=None , _lowercase : int=None , ):
"""simple docstring"""
UpperCAmelCase__ = self.layer_norm(_lowercase )
if conditioning_emb is not None:
UpperCAmelCase__ = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
UpperCAmelCase__ = self.attention(_lowercase )
UpperCAmelCase__ = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowercase__ ( nn.Module ):
def __init__( self : Dict , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
UpperCAmelCase__ = TaLayerNorm(_lowercase , eps=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : List[str] , _lowercase : Dict=None , _lowercase : Dict=None , ):
"""simple docstring"""
UpperCAmelCase__ = self.layer_norm(_lowercase )
UpperCAmelCase__ = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase__ = hidden_states + self.dropout(_lowercase )
return layer_output
class lowercase__ ( nn.Module ):
def __init__( self : Dict , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Tuple ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
UpperCAmelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
UpperCAmelCase__ = TaLayerNorm(_lowercase , eps=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Any , _lowercase : int=None ):
"""simple docstring"""
UpperCAmelCase__ = self.layer_norm(_lowercase )
if conditioning_emb is not None:
UpperCAmelCase__ = self.film(_lowercase , _lowercase )
UpperCAmelCase__ = self.DenseReluDense(_lowercase )
UpperCAmelCase__ = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowercase__ ( nn.Module ):
def __init__( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
UpperCAmelCase__ = NewGELUActivation()
def _UpperCAmelCase ( self : Any , _lowercase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.act(self.wi_a(_lowercase ) )
UpperCAmelCase__ = self.wi_a(_lowercase )
UpperCAmelCase__ = hidden_gelu * hidden_linear
UpperCAmelCase__ = self.dropout(_lowercase )
UpperCAmelCase__ = self.wo(_lowercase )
return hidden_states
class lowercase__ ( nn.Module ):
def __init__( self : str , _lowercase : List[Any] , _lowercase : List[str]=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.ones(_lowercase ) )
UpperCAmelCase__ = eps
def _UpperCAmelCase ( self : int , _lowercase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
UpperCAmelCase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase__ ( nn.Module ):
def _UpperCAmelCase ( self : int , _lowercase : torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_lowercase , 3.0 )) ))
class lowercase__ ( nn.Module ):
def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : Any , _lowercase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.scale_bias(_lowercase )
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(_lowercase , 2 , -1 )
UpperCAmelCase__ = x * (1 + scale) + shift
return x
| 475 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] ={
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A_ ( __a ):
_A :Tuple = '''unispeech'''
def __init__( self : List[str] , snake_case__ : int=32 , snake_case__ : Optional[Any]=7_68 , snake_case__ : Tuple=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Any=30_72 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[int]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Any=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=0.02 , snake_case__ : str=1E-5 , snake_case__ : Dict="group" , snake_case__ : int="gelu" , snake_case__ : int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : int=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : str=False , snake_case__ : Union[str, Any]=1_28 , snake_case__ : Optional[int]=16 , snake_case__ : Tuple=False , snake_case__ : Dict=True , snake_case__ : Optional[Any]=0.05 , snake_case__ : int=10 , snake_case__ : int=2 , snake_case__ : List[str]=0.0 , snake_case__ : Dict=10 , snake_case__ : Any=0 , snake_case__ : Any=3_20 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Union[str, Any]=1_00 , snake_case__ : List[str]=2_56 , snake_case__ : Any=2_56 , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]="mean" , snake_case__ : List[str]=False , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=2_56 , snake_case__ : Union[str, Any]=80 , snake_case__ : Optional[int]=0 , snake_case__ : Any=1 , snake_case__ : List[str]=2 , snake_case__ : Any=0.5 , **snake_case__ : Any , ):
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowercase = hidden_size
lowercase = feat_extract_norm
lowercase = feat_extract_activation
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = list(snake_case__ )
lowercase = conv_bias
lowercase = num_conv_pos_embeddings
lowercase = num_conv_pos_embedding_groups
lowercase = len(self.conv_dim )
lowercase = num_hidden_layers
lowercase = intermediate_size
lowercase = hidden_act
lowercase = num_attention_heads
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = feat_proj_dropout
lowercase = final_dropout
lowercase = layerdrop
lowercase = layer_norm_eps
lowercase = initializer_range
lowercase = num_ctc_classes
lowercase = vocab_size
lowercase = do_stable_layer_norm
lowercase = use_weighted_layer_sum
lowercase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase = apply_spec_augment
lowercase = mask_time_prob
lowercase = mask_time_length
lowercase = mask_time_min_masks
lowercase = mask_feature_prob
lowercase = mask_feature_length
lowercase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase = num_codevectors_per_group
lowercase = num_codevector_groups
lowercase = contrastive_logits_temperature
lowercase = feat_quantizer_dropout
lowercase = num_negatives
lowercase = codevector_dim
lowercase = proj_codevector_dim
lowercase = diversity_loss_weight
# ctc loss
lowercase = ctc_loss_reduction
lowercase = ctc_zero_infinity
# pretraining loss
lowercase = replace_prob
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A_ :
_A :int
_A :int
class A_ :
def __init__( self : List[str] , snake_case__ : int ):
lowercase = [[] for _ in range(snake_case__ )]
lowercase = size
def __getitem__( self : Optional[int] , snake_case__ : int ):
return iter(self._graph[vertex] )
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self._size
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ):
lowercase = deque([start_vertex] )
lowercase = [None] * self.size
lowercase = 0
while queue:
lowercase = queue.popleft()
lowercase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase = current_distance + edge.weight
lowercase = distances[edge.destination_vertex]
if (
isinstance(snake_case__ , snake_case__ )
and new_distance >= dest_vertex_distance
):
continue
lowercase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
__UpperCamelCase : Any = '1'
__UpperCamelCase : Any = '0'
__UpperCamelCase : Any = '1'
__UpperCamelCase : List[str] = ort.SessionOptions()
__UpperCamelCase : List[str] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
__UpperCamelCase : str = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
__UpperCamelCase : Any = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
__UpperCamelCase : List[Any] = ort.RunOptions()
__UpperCamelCase : Optional[Any] = 128
__UpperCamelCase : Tuple = 1
__UpperCamelCase : Optional[Any] = np.ones((batch, sequence), dtype=np.intaa)
__UpperCamelCase : Optional[int] = np.ones((batch, sequence), dtype=np.intaa)
__UpperCamelCase : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
__UpperCamelCase : List[Any] = time.time()
__UpperCamelCase : Optional[Any] = 2000
__UpperCamelCase : int = {}
for iter in range(max_iters):
__UpperCamelCase : Any = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
| 248 | import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__UpperCamelCase : List[Any] = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def A ( _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
return (preds == labels).mean()
def A ( _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
SCREAMING_SNAKE_CASE : int = simple_accuracy(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = fa_score(y_true=_lowercase , y_pred=_lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A ( _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
SCREAMING_SNAKE_CASE : str = pearsonr(_lowercase , _lowercase )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = spearmanr(_lowercase , _lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A ( _lowercase , _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
assert len(_lowercase ) == len(_lowercase ), f"""Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowercase , _lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mrpc":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowercase , _lowercase )
elif task_name == "qqp":
return acc_and_fa(_lowercase , _lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
def A ( _lowercase , _lowercase , _lowercase ):
warnings.warn(_lowercase , _lowercase )
requires_backends(_lowercase , '''sklearn''' )
if len(_lowercase ) != len(_lowercase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowercase )} and {len(_lowercase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(_lowercase )
| 248 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowercase_ ( a__ , a__ , unittest.TestCase ):
__UpperCAmelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def __a ( self , a , a , a=False ):
UpperCamelCase__ = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
UpperCamelCase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowercase_ ( a__ ):
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=32 , a=2 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = embedding_size
def __a ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , a , a , a , a , a , a , a ):
UpperCamelCase__ = TFMobileBertModel(config=a )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = model(a )
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(a )
UpperCamelCase__ = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , a , a , a , a , a , a , a ):
UpperCamelCase__ = TFMobileBertForMaskedLM(config=a )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , a , a , a , a , a , a , a ):
UpperCamelCase__ = TFMobileBertForNextSentencePrediction(config=a )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , a , a , a , a , a , a , a ):
UpperCamelCase__ = TFMobileBertForPreTraining(config=a )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = model(a )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , a , a , a , a , a , a , a ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFMobileBertForSequenceClassification(config=a )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , a , a , a , a , a , a , a ):
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFMobileBertForMultipleChoice(config=a )
UpperCamelCase__ = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , a , a , a , a , a , a , a ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFMobileBertForTokenClassification(config=a )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , a , a , a , a , a , a , a ):
UpperCamelCase__ = TFMobileBertForQuestionAnswering(config=a )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def __a ( self ):
UpperCamelCase__ = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=a , hidden_size=37 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase__ = TFMobileBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_tf
class lowercase_ ( unittest.TestCase ):
@slow
def __a ( self ):
UpperCamelCase__ = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
UpperCamelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(a )[0]
UpperCamelCase__ = [1, 6, 3_05_22]
self.assertEqual(output.shape , a )
UpperCamelCase__ = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-4 )
| 223 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[int] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['OwlViTFeatureExtractor']
a__ : Tuple = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 223 | 1 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__UpperCamelCase : int = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ):
inspect_dataset(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = path + '.py'
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : int ):
inspect_metric(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = path + '.py'
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ):
with pytest.raises(_UpperCAmelCase ):
get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase = get_dataset_config_names(_UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ):
lowerCAmelCase = get_dataset_infos(_UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
lowerCAmelCase = expected_configs[0]
assert expected_config in infos
lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ):
lowerCAmelCase = get_dataset_infos(_UpperCAmelCase )
assert expected_config in infos
lowerCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
with pytest.raises(_UpperCAmelCase ):
get_dataset_split_names(_UpperCAmelCase , config_name=_UpperCAmelCase )
| 4 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCAmelCase ( *_a, **_a ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@require_tf
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
[
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
{"score": 0.333, "label": ANY(_a )},
],
], )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__SCREAMING_SNAKE_CASE = image_classifier(_a, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
__SCREAMING_SNAKE_CASE = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 693 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__snake_case = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ = 'weight'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> List[str]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ = UniSpeechSatConfig.from_pretrained(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE__ = ''
if is_finetuned:
SCREAMING_SNAKE_CASE__ = UniSpeechSatForCTC(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ = UniSpeechSatForPreTraining(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
SCREAMING_SNAKE_CASE__ = model[0].eval()
recursively_load_weights(UpperCamelCase_ , UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__snake_case = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 707 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple ="""open-llama"""
def __init__( self : Tuple , UpperCAmelCase_ : str=100000 , UpperCAmelCase_ : Dict=4096 , UpperCAmelCase_ : Optional[int]=11008 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Optional[int]="silu" , UpperCAmelCase_ : Tuple=2048 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : List[Any]=1e-6 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = rms_norm_eps
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = kwargs.pop(
'use_memorry_efficient_attention' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_dropout_prob
SCREAMING_SNAKE_CASE__ = use_stable_embedding
SCREAMING_SNAKE_CASE__ = shared_input_output_embedding
SCREAMING_SNAKE_CASE__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ , )
def A_ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}' )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get('type' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get('factor' , UpperCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 400 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _A ( __snake_case :Union[str, Any] , __snake_case :List[Any]=False ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def _A ( __snake_case :str , __snake_case :List[str]=None , __snake_case :Dict=None ) -> Optional[int]:
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE = "./model_checkpoints/vqgan_only.yaml"
__SCREAMING_SNAKE_CASE = load_config(__snake_case , display=__snake_case )
__SCREAMING_SNAKE_CASE = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE = "./model_checkpoints/vqgan_only.pt"
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE = sd["state_dict"]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def _A ( __snake_case :Optional[int] , __snake_case :Dict ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.encode(__snake_case )
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE = model.decode(__snake_case )
return xrec
def _A ( __snake_case :List[Any] , __snake_case :List[str]=False ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = string.rsplit("." , 1 )
if reload:
__SCREAMING_SNAKE_CASE = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def _A ( __snake_case :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def _A ( __snake_case :Any , __snake_case :List[Any] , __snake_case :Tuple=True , __snake_case :int=True ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _A ( __snake_case :Union[str, Any] , __snake_case :List[Any] , __snake_case :Any , __snake_case :Tuple ) -> Union[str, Any]:
"""simple docstring"""
if ckpt:
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
__SCREAMING_SNAKE_CASE = pl_sd["global_step"]
print(f'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE = {"state_dict": None}
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__snake_case , eval_mode=__snake_case )["model"]
return model, global_step
| 693 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a, _a=99, _a=13, _a=7, _a=9, _a=True, _a=True, _a=False, _a=32, _a=5, _a=4, _a=37, _a=8, _a=0.1, _a=0.002, _a=1, _a=0, _a=0, _a=None, _a=None, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = encoder_seq_length
__SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
__SCREAMING_SNAKE_CASE = self.decoder_seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_attention_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = decoder_layers
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig.from_pretrained("google/umt5-base" )
def __lowerCAmelCase ( self, _a, _a, _a, _a=None, _a=None, _a=None, _a=None, _a=None, ) -> int:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers, config.num_attention_heads, device=_a )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers, config.num_attention_heads, device=_a )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_attention_heads, device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1 )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = config.num_attention_heads
__SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(_a, _a, _a )
return config, input_dict
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
return TaConfig(
vocab_size=1_66, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a )
model.to(_a )
model.eval()
__SCREAMING_SNAKE_CASE = model(
input_ids=_a, decoder_input_ids=_a, attention_mask=_a, decoder_attention_mask=_a, )
__SCREAMING_SNAKE_CASE = model(input_ids=_a, decoder_input_ids=_a )
__SCREAMING_SNAKE_CASE = result.last_hidden_state
__SCREAMING_SNAKE_CASE = result.past_key_values
__SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ), config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ), 4 )
def __lowerCAmelCase ( self, _a, _a, _a, _a, _a, _a, ) -> Tuple:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
__SCREAMING_SNAKE_CASE = model(_a )
__SCREAMING_SNAKE_CASE = model(_a, use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1), config.vocab_size )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens], dim=-1 )
__SCREAMING_SNAKE_CASE = model(_a )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(_a, past_key_values=_a )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,), output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a, _a, atol=1E-3 ) )
def __lowerCAmelCase ( self, _a, _a, ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = UMTaModel(config=_a ).to(_a ).half().eval()
__SCREAMING_SNAKE_CASE = model(**_a )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ =(
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
SCREAMING_SNAKE_CASE__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE__ =[0.8, 0.9]
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a, (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]), f'''{tmpdirname}/t5_test.onnx''', export_params=_a, opset_version=9, input_names=["input_ids", "decoder_input_ids"], )
@unittest.skipIf(torch_device == "cpu", "Cant do half precision" )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs[0]
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
__SCREAMING_SNAKE_CASE = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=_a ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=_a ),
}
for attn_name, (name, mask) in zip(_a, head_masking.items() ):
__SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers, config.num_heads, device=_a )
__SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]["input_ids"], num_beams=1, max_length=3, output_attentions=_a, return_dict_in_generate=_a, **_a, )
# We check the state of decoder_attentions and cross_attentions just from the last step
__SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ), 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def __lowerCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained("google/umt5-small", return_dict=_a ).to(_a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/umt5-small", use_fast=_a, legacy=_a )
__SCREAMING_SNAKE_CASE = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__SCREAMING_SNAKE_CASE = tokenizer(_a, return_tensors="pt", padding=_a ).input_ids
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a, _a )
__SCREAMING_SNAKE_CASE = model.generate(input_ids.to(_a ) )
__SCREAMING_SNAKE_CASE = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a )
self.assertEqual(_a, _a )
| 693 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a :List[Any] = logging.get_logger(__name__)
a :Optional[int] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = """focalnet"""
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=False , _a=[192, 384, 768, 768] , _a=[2, 2, 6, 2] , _a=[2, 2, 2, 2] , _a=[3, 3, 3, 3] , _a="gelu" , _a=4.0 , _a=0.0 , _a=0.1 , _a=False , _a=1E-4 , _a=False , _a=False , _a=False , _a=0.02 , _a=1E-5 , _a=32 , _a=None , _a=None , **_a , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE__ : List[str] = use_conv_embed
SCREAMING_SNAKE_CASE__ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Any = focal_levels
SCREAMING_SNAKE_CASE__ : Optional[Any] = focal_windows
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ : str = use_layerscale
SCREAMING_SNAKE_CASE__ : int = layerscale_value
SCREAMING_SNAKE_CASE__ : Optional[int] = use_post_layernorm
SCREAMING_SNAKE_CASE__ : Any = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE__ : Union[str, Any] = normalize_modulator
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = encoder_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 12 |
"""simple docstring"""
from math import sqrt
def _lowercase ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( __lowerCAmelCase = 1_0001 ) -> int:
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'{solution() = }')
| 12 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase ( UpperCAmelCase ) ->Union[str, Any]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase ( ) ->str:
"""simple docstring"""
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
__magic_name__ : Tuple = [1, 2, 3]
with pytest.raises(lowercase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowercase_, lowercase_, num_proc=2 )
with pytest.raises(lowercase_ ):
with parallel_backend('''unsupported backend''' ):
map_nested(lowercase_, lowercase_, num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''', [2, -1] )
def lowerCAmelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
__magic_name__ : Optional[int] = [1, 2]
__magic_name__ : List[Any] = {'''a''': 1, '''b''': 2}
__magic_name__ : List[Any] = {'''a''': [1, 2], '''b''': [3, 4]}
__magic_name__ : Any = {'''a''': {'''1''': 1}, '''b''': 2}
__magic_name__ : Any = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__magic_name__ : Optional[int] = [2, 3]
__magic_name__ : Any = {'''a''': 2, '''b''': 3}
__magic_name__ : List[str] = {'''a''': [2, 3], '''b''': [4, 5]}
__magic_name__ : List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
__magic_name__ : List[Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(lowercase_, lowercase_, num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_, lowercase_, num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_, lowercase_, num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_, lowercase_, num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_, lowercase_, num_proc=lowercase_ ) == expected_map_nested_sa
| 154 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__SCREAMING_SNAKE_CASE : str = 2_9_9_7_9_2_4_5_8
# Symbols
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = symbols('''ct x y z''')
def lowerCAmelCase_( lowercase_ : float ) -> float:
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase_( lowercase_ : float ) -> float:
return 1 / sqrt(1 - beta(lowercase_ ) ** 2 )
def lowerCAmelCase_( lowercase_ : float ) -> np.ndarray:
return np.array(
[
[gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0],
[-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase_( lowercase_ : float , lowercase_ : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowerCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__SCREAMING_SNAKE_CASE : List[str] = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__SCREAMING_SNAKE_CASE : Tuple = {ct: c, x: 1, y: 1, z: 1}
__SCREAMING_SNAKE_CASE : int = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 661 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :Any = {"vocab_file": "spm_char.model"}
a_ :str = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
a_ :Optional[int] = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : int, _snake_case : Optional[int], _snake_case : int="<s>", _snake_case : Optional[Any]="</s>", _snake_case : Optional[Any]="<unk>", _snake_case : Union[str, Any]="<pad>", _snake_case : Optional[Dict[str, Any]] = None, **_snake_case : Optional[int], ) ->None:
snake_case__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case, eos_token=_snake_case, unk_token=_snake_case, pad_token=_snake_case, sp_model_kwargs=self.sp_model_kwargs, **_snake_case, )
snake_case__ : Union[str, Any] = vocab_file
snake_case__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def lowercase_ ( self : List[Any] ) ->Any:
return self.sp_model.get_piece_size()
def lowercase_ ( self : List[Any] ) ->Tuple:
snake_case__ : int = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) ->Optional[Any]:
snake_case__ : Optional[Any] = self.__dict__.copy()
snake_case__ : Dict = None
return state
def __setstate__( self : List[Any], _snake_case : str ) ->str:
snake_case__ : Optional[int] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
snake_case__ : Tuple = {}
snake_case__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Union[str, Any], _snake_case : str ) ->List[str]:
return self.sp_model.encode(_snake_case, out_type=_snake_case )
def lowercase_ ( self : Tuple, _snake_case : Optional[int] ) ->Tuple:
return self.sp_model.piece_to_id(_snake_case )
def lowercase_ ( self : List[str], _snake_case : Union[str, Any] ) ->Union[str, Any]:
snake_case__ : Optional[Any] = self.sp_model.IdToPiece(_snake_case )
return token
def lowercase_ ( self : Optional[int], _snake_case : int ) ->int:
snake_case__ : List[str] = []
snake_case__ : List[str] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
snake_case__ : int = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def lowercase_ ( self : List[str], _snake_case : Optional[int], _snake_case : Union[str, Any]=None ) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Union[str, Any], _snake_case : List[int], _snake_case : Optional[List[int]] = None, _snake_case : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case, token_ids_a=_snake_case, already_has_special_tokens=_snake_case )
snake_case__ : Tuple = [1]
if token_ids_a is None:
return ([0] * len(_snake_case )) + suffix_ones
return ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def lowercase_ ( self : Optional[int], _snake_case : str, _snake_case : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Tuple = os.path.join(
_snake_case, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case, 'wb' ) as fi:
snake_case__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 243 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ :str = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Optional[int] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 243 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
UpperCAmelCase_ : List[Any] = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCAmelCase_ : Any = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# Load the entity vocab file
UpperCAmelCase_ : Optional[Any] = load_entity_vocab(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase_ : List[Any] = AddedToken("<ent>" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = AddedToken("<ent2>" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
UpperCAmelCase_ : str = state_dict["embeddings.word_embeddings.weight"]
UpperCAmelCase_ : Dict = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
UpperCAmelCase_ : Any = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
UpperCAmelCase_ : int = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase_ : List[str] = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCAmelCase_ : Tuple = state_dict[prefix + matrix_name]
UpperCAmelCase_ : Union[str, Any] = state_dict[prefix + matrix_name]
UpperCAmelCase_ : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase_ : List[str] = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCAmelCase_ : int = entity_emb[entity_vocab["[MASK]"]]
UpperCAmelCase_ : Dict = LukeModel(config=_SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if not (len(_SCREAMING_SNAKE_CASE ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(_SCREAMING_SNAKE_CASE )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
UpperCAmelCase_ : Optional[Any] = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task="entity_classification" )
UpperCAmelCase_ : Tuple = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
UpperCAmelCase_ : Optional[Any] = (39, 42)
UpperCAmelCase_ : List[str] = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , add_prefix_space=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCAmelCase_ : Dict = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
UpperCAmelCase_ : str = torch.Size((1, 42, 10_24) )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
UpperCAmelCase_ : int = torch.Size((1, 42, 7_68) )
UpperCAmelCase_ : Dict = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCAmelCase_ : int = torch.Size((1, 1, 10_24) )
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
UpperCAmelCase_ : int = torch.Size((1, 1, 7_68) )
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = {}
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = line.rstrip().split("\t" )
UpperCAmelCase_ : List[str] = index
return entity_vocab
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 71 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 1 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowerCAmelCase (_UpperCamelCase ):
re.sub('<n>' , '' , _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) ) | 720 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[str] = len(_UpperCamelCase )
__lowerCAmelCase : Tuple = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
__lowerCAmelCase : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : int = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 549 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_UpperCamelCase = logging.getLogger(__name__)
class lowerCamelCase__ ( snake_case ):
def __init__( self ,A ,A ,A ,A=None ):
super().__init__(
A ,question_encoder_tokenizer=A ,generator_tokenizer=A ,index=A ,init_retrieval=A ,)
UpperCAmelCase = None
def _UpperCamelCase ( self ,A ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
UpperCAmelCase = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase = str(distributed_port + 1 )
UpperCAmelCase = dist.new_group(ranks=A ,backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _UpperCamelCase ( self ):
return dist.get_rank(group=self.process_group ) == 0
def _UpperCamelCase ( self ,A ,A ,A=torch.floataa ):
UpperCAmelCase = torch.empty(A ,dtype=A )
dist.scatter(A ,src=0 ,scatter_list=A ,group=self.process_group )
return target_tensor
def _UpperCamelCase ( self ):
UpperCAmelCase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase = next((addr for addr in addrs if addr.startswith("""e""" )) ,A )
return ifname
def _UpperCamelCase ( self ,A ,A ):
# single GPU training
if not dist.is_initialized():
UpperCAmelCase , UpperCAmelCase = self._main_retrieve(A ,A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A )
# distributed training
UpperCAmelCase = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase = None
if self._is_main():
UpperCAmelCase = [torch.empty(question_hidden_states.shape ,dtype=torch.floataa ) for _ in range(A )]
dist.gather(torch.tensor(A ) ,dst=0 ,gather_list=A ,group=self.process_group )
# scatter logic
UpperCAmelCase = question_hidden_states.shape[0]
UpperCAmelCase = []
UpperCAmelCase = []
if self._is_main():
assert len(A ) == world_size
UpperCAmelCase , UpperCAmelCase = self._main_retrieve(torch.cat(A ).numpy() ,A )
UpperCAmelCase , UpperCAmelCase = torch.tensor(A ), torch.tensor(A )
UpperCAmelCase = self._chunk_tensor(A ,A )
UpperCAmelCase = self._chunk_tensor(A ,A )
UpperCAmelCase = self._scattered(A ,[n_queries, n_docs] ,target_type=torch.intaa )
UpperCAmelCase = self._scattered(A ,[n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A )
| 341 |
"""simple docstring"""
_UpperCamelCase = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
_UpperCamelCase = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = from_type.lower().strip("""s""" )
UpperCAmelCase = to_type.lower().strip("""s""" )
UpperCAmelCase = UNIT_SYMBOL.get(_snake_case , _snake_case )
UpperCAmelCase = UNIT_SYMBOL.get(_snake_case , _snake_case )
if from_sanitized not in METRIC_CONVERSION:
UpperCAmelCase = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_snake_case )}'''
)
raise ValueError(_snake_case )
if to_sanitized not in METRIC_CONVERSION:
UpperCAmelCase = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_snake_case )}'''
)
raise ValueError(_snake_case )
UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
UpperCAmelCase = 1
if from_exponent > to_exponent:
UpperCAmelCase = from_exponent - to_exponent
else:
UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 , _snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( __magic_name__ ):
_snake_case = '''dandelin/vilt-b32-finetuned-vqa'''
_snake_case = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
_snake_case = '''image_qa'''
_snake_case = AutoProcessor
_snake_case = AutoModelForVisualQuestionAnswering
_snake_case = ['''image''', '''text''']
_snake_case = ['''text''']
def __init__( self : Optional[int], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(self, ['''vision'''] )
super().__init__(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int, SCREAMING_SNAKE_CASE_ : "Image", SCREAMING_SNAKE_CASE_ : str ):
return self.pre_processor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, return_tensors='''pt''' )
def __snake_case ( self : Tuple, SCREAMING_SNAKE_CASE_ : str ):
with torch.no_grad():
return self.model(**SCREAMING_SNAKE_CASE_ ).logits
def __snake_case ( self : str, SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
snake_case : Optional[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 555 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 555 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def A ( __UpperCamelCase ) -> Union[str, Any]:
if hor == 128:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A__ = (32, 64, 128, 256)
A__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A__ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A__ = model.state_dict()
A__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def A ( ) -> List[str]:
A__ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A__ = model
A__ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 9 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
A__ : Dict = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __magic_name__ ( __A ):
UpperCamelCase_ = '''tapas'''
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=1024 , A_=[3, 256, 256, 2, 256, 256, 10] , A_=0.02 , A_=1E-12 , A_=0 , A_=10.0 , A_=0 , A_=1.0 , A_=None , A_=1.0 , A_=False , A_=None , A_=1.0 , A_=1.0 , A_=False , A_=False , A_="ratio" , A_=None , A_=None , A_=64 , A_=32 , A_=False , A_=True , A_=False , A_=False , A_=True , A_=False , A_=None , A_=None , **A_ , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_lowercase: str = vocab_size
_lowercase: str = hidden_size
_lowercase: Optional[Any] = num_hidden_layers
_lowercase: Any = num_attention_heads
_lowercase: List[str] = hidden_act
_lowercase: Tuple = intermediate_size
_lowercase: Optional[int] = hidden_dropout_prob
_lowercase: Optional[int] = attention_probs_dropout_prob
_lowercase: Tuple = max_position_embeddings
_lowercase: int = type_vocab_sizes
_lowercase: Optional[Any] = initializer_range
_lowercase: Any = layer_norm_eps
# Fine-tuning task hyperparameters
_lowercase: int = positive_label_weight
_lowercase: int = num_aggregation_labels
_lowercase: Optional[int] = aggregation_loss_weight
_lowercase: List[str] = use_answer_as_supervision
_lowercase: List[str] = answer_loss_importance
_lowercase: List[Any] = use_normalized_answer_loss
_lowercase: Dict = huber_loss_delta
_lowercase: List[str] = temperature
_lowercase: List[str] = aggregation_temperature
_lowercase: int = use_gumbel_for_cells
_lowercase: Tuple = use_gumbel_for_aggregation
_lowercase: int = average_approximation_function
_lowercase: List[str] = cell_selection_preference
_lowercase: Tuple = answer_loss_cutoff
_lowercase: str = max_num_rows
_lowercase: Optional[int] = max_num_columns
_lowercase: Tuple = average_logits_per_cell
_lowercase: Optional[int] = select_one_column
_lowercase: Tuple = allow_empty_column_selection
_lowercase: int = init_cell_selection_weights_to_zero
_lowercase: Optional[Any] = reset_position_index_per_cell
_lowercase: Any = disable_per_token_loss
# Aggregation hyperparameters
_lowercase: Dict = aggregation_labels
_lowercase: Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , A_ ):
_lowercase: List[str] = {int(A_ ): v for k, v in aggregation_labels.items()}
| 704 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
_lowercase: Tuple = [True] * (num + 1)
_lowercase: List[str] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _UpperCamelCase ):
_lowercase: List[str] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : List[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 272 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case_ = 16
snake_case_ = 32
def __lowercase (_SCREAMING_SNAKE_CASE :Union[str, Any] , _SCREAMING_SNAKE_CASE :Any = 16 ):
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_SCREAMING_SNAKE_CASE :Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_SCREAMING_SNAKE_CASE :Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : int = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : str = 8
else:
SCREAMING_SNAKE_CASE : str = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case_ = mocked_dataloaders # noqa: F811
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :List[str] ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1":
SCREAMING_SNAKE_CASE : str = 2
# New Code #
SCREAMING_SNAKE_CASE : List[Any] = int(args.gradient_accumulation_steps )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Optional[Any] = config["lr"]
SCREAMING_SNAKE_CASE : str = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE : List[str] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE : Optional[Any] = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE : str = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[Any] = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : List[str] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE : str = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=_SCREAMING_SNAKE_CASE , model=_SCREAMING_SNAKE_CASE , local_sgd_steps=_SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = output.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE : Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
def __lowercase ():
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_SCREAMING_SNAKE_CASE , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 507 | import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case__ ( lowercase , lowercase ):
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: Any = tmp_path / "cache"
lowerCAmelCase_: int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_: Optional[int] = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: List[str] = tmp_path / "cache"
lowerCAmelCase_: int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase_: List[str] = features.copy() if features else default_expected_features
lowerCAmelCase_: Any = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_: int = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowercase , cache_dir=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
def snake_case__ ( lowercase ):
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
lowerCAmelCase_: Any = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: Optional[int] = tmp_path / "cache"
lowerCAmelCase_: Optional[Any] = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: str = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
lowerCAmelCase_: Union[str, Any] = iter_sql_file(lowercase )
lowerCAmelCase_: str = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: str = tmp_path / "cache"
lowerCAmelCase_: Optional[int] = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
lowerCAmelCase_: Optional[Any] = iter_sql_file(lowercase )
lowerCAmelCase_: Optional[int] = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: Union[str, Any] = tmp_path / "cache"
lowerCAmelCase_: int = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: Any = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
with pytest.raises(lowercase ):
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write() | 613 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase__ : List[Any] = None
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Any = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : Optional[Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
UpperCamelCase__ : Optional[Any] = '''▁'''
# Segments (not really needed)
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 1
UpperCamelCase__ : List[Any] = 2
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : List[Any] = 4
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = '''left'''
_A : List[Any] = XLNetTokenizer
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Optional[int]="<sep>" , lowerCAmelCase__ : Optional[Any]="<pad>" , lowerCAmelCase__ : int="<cls>" , lowerCAmelCase__ : Optional[int]="<mask>" , lowerCAmelCase__ : Optional[Any]=["<eop>", "<eod>"] , **lowerCAmelCase__ : List[str] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Dict = 3
__SCREAMING_SNAKE_CASE : Tuple = do_lower_case
__SCREAMING_SNAKE_CASE : Union[str, Any] = remove_space
__SCREAMING_SNAKE_CASE : Dict = keep_accents
__SCREAMING_SNAKE_CASE : str = vocab_file
__SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,) | 178 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : List[Any] = SwinvaConfig()
__SCREAMING_SNAKE_CASE : List[Any] = swinva_name.split("""_""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = name_split[1]
if "to" in name_split[3]:
__SCREAMING_SNAKE_CASE : Dict = int(name_split[3][-3:] )
else:
__SCREAMING_SNAKE_CASE : str = int(name_split[3] )
if "to" in name_split[2]:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(name_split[2][-2:] )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = int(name_split[2][6:] )
if model_size == "tiny":
__SCREAMING_SNAKE_CASE : Dict = 96
__SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
__SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif model_size == "small":
__SCREAMING_SNAKE_CASE : List[str] = 96
__SCREAMING_SNAKE_CASE : int = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif model_size == "base":
__SCREAMING_SNAKE_CASE : int = 1_28
__SCREAMING_SNAKE_CASE : str = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Optional[int] = (4, 8, 16, 32)
else:
__SCREAMING_SNAKE_CASE : List[str] = 1_92
__SCREAMING_SNAKE_CASE : Union[str, Any] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Dict = (6, 12, 24, 48)
if "to" in swinva_name:
__SCREAMING_SNAKE_CASE : int = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__SCREAMING_SNAKE_CASE : int = 2_18_41
__SCREAMING_SNAKE_CASE : str = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE : List[str] = """imagenet-22k-id2label.json"""
__SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE : str = 10_00
__SCREAMING_SNAKE_CASE : Optional[int] = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE : Any = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE : int = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = idalabel
__SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Any = img_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_classes
__SCREAMING_SNAKE_CASE : int = embed_dim
__SCREAMING_SNAKE_CASE : Dict = depths
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : int = window_size
return config
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
__SCREAMING_SNAKE_CASE : str = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE : Tuple = """layernorm.weight"""
if name == "norm.bias":
__SCREAMING_SNAKE_CASE : Optional[int] = """layernorm.bias"""
if "head" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""head""" , """classifier""" )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = """swinv2.""" + name
return name
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Optional[Any] ):
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )
__SCREAMING_SNAKE_CASE : List[str] = int(key_split[1] )
__SCREAMING_SNAKE_CASE : Dict = int(key_split[3] )
__SCREAMING_SNAKE_CASE : str = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :]
__SCREAMING_SNAKE_CASE : str = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val
return orig_state_dict
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
__SCREAMING_SNAKE_CASE : int = get_swinva_config(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = SwinvaForImageClassification(_lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : int = timm_model(inputs["""pixel_values"""] )
__SCREAMING_SNAKE_CASE : Dict = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 178 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = (DEISMultistepScheduler,)
_lowerCamelCase = (("""num_inference_steps""", 25),)
def snake_case_ ( self , **__A ):
__a = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**__A )
return config
def snake_case_ ( self , __A=0 , **__A ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __A )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__A )
__a = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
__a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
__a = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
__a = dummy_past_residuals[: new_scheduler.config.solver_order]
__a , __a = sample, sample
for t in range(__A , time_step + scheduler.config.solver_order + 1 ):
__a = scheduler.step(__A , __A , __A , **__A ).prev_sample
__a = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self ):
pass
def snake_case_ ( self , __A=0 , **__A ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __A )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
__a = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[: new_scheduler.config.solver_order]
__a = scheduler.step(__A , __A , __A , **__A ).prev_sample
__a = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self , __A=None , **__A ):
if scheduler is None:
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__A )
__a = scheduler_class(**__A )
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__A )
__a = scheduler_class(**__A )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__A , __A )
__a = scheduler.step(__A , __A , __A ).prev_sample
return sample
def snake_case_ ( self ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __A )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__A )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , """set_timesteps""" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.10]
__a = dummy_past_residuals[: scheduler.config.solver_order]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__A , __A , __A , **__A ).prev_sample
__a = scheduler.step(__A , __A , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__a = DEISMultistepScheduler(**self.get_scheduler_config() )
__a = self.full_loop(scheduler=__A )
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
__a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__a = DPMSolverMultistepScheduler.from_config(scheduler.config )
__a = UniPCMultistepScheduler.from_config(scheduler.config )
__a = DEISMultistepScheduler.from_config(scheduler.config )
__a = self.full_loop(scheduler=__A )
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def snake_case_ ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def snake_case_ ( self ):
self.check_over_configs(thresholding=__A )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , algorithm_type="""deis""" , solver_order=__A , solver_type=__A , )
def snake_case_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def snake_case_ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__A , solver_type=__A , prediction_type=__A , algorithm_type=__A , )
__a = self.full_loop(
solver_order=__A , solver_type=__A , prediction_type=__A , algorithm_type=__A , )
assert not torch.isnan(__A ).any(), "Samples have nan numbers"
def snake_case_ ( self ):
self.check_over_configs(lower_order_final=__A )
self.check_over_configs(lower_order_final=__A )
def snake_case_ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__A , time_step=0 )
def snake_case_ ( self ):
__a = self.full_loop()
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def snake_case_ ( self ):
__a = self.full_loop(prediction_type="""v_prediction""" )
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def snake_case_ ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(thresholding=__A , dynamic_thresholding_ratio=0 )
__a = scheduler_class(**__A )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__A , __A )
__a = scheduler.step(__A , __A , __A ).prev_sample
assert sample.dtype == torch.floataa
| 99 |
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> str:
"""simple docstring"""
__A = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> dict[str, str]:
"""simple docstring"""
__A = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
__A = remove_duplicates(key.upper() )
__A = len(__lowercase )
# First fill cipher with key characters
__A = {alphabet[i]: char for i, char in enumerate(__lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__lowercase ) , 2_6 ):
__A = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__A = alphabet[i - offset]
__A = char
return cipher_alphabet
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : dict[str, str] ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : dict[str, str] ) -> str:
"""simple docstring"""
__A = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
__A = input("""Enter message to encode or decode: """ ).strip()
__A = input("""Enter keyword: """ ).strip()
__A = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__A = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__A = create_cipher_map(__lowercase )
print(func(__lowercase , __lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 637 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : List[str] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'timesformer'
def __init__( self : List[Any] , lowerCamelCase : List[Any]=2_24 , lowerCamelCase : List[str]=16 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : List[Any]=8 , lowerCamelCase : List[str]=7_68 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Any=12 , lowerCamelCase : Any=30_72 , lowerCamelCase : str="gelu" , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : str=0.02 , lowerCamelCase : Any=1E-6 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Tuple="divided_space_time" , lowerCamelCase : int=0 , **lowerCamelCase : List[str] , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Any = num_frames
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Optional[int] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : Tuple = qkv_bias
lowerCAmelCase_ : List[Any] = attention_type
lowerCAmelCase_ : List[Any] = drop_path_rate
| 712 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__A : List[Any] = 5_0000
__A : str = 5000
__A , __A : List[str] = os.path.split(__file__)
__A : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : List[Any] ):
'''simple docstring'''
for i in range(A__ ):
lowerCAmelCase_ : str = dataset[i]
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : Dict , A__ : Union[str, Any] ):
'''simple docstring'''
for i in range(0 , len(A__ ) , A__ ):
lowerCAmelCase_ : Optional[int] = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : Union[str, Any] , A__ : List[str] ):
'''simple docstring'''
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
lowerCAmelCase_ : List[Any] = dataset[i]
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : Union[str, Any] , A__ : Optional[Any] , A__ : int ):
'''simple docstring'''
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
lowerCAmelCase_ : Tuple = dataset[i : i + batch_size]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
lowerCAmelCase_ : List[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
lowerCAmelCase_ : Dict = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
lowerCAmelCase_ : Dict = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
lowerCAmelCase_ : str = generate_example_dataset(
os.path.join(A__ , """dataset.arrow""" ) , A__ , num_examples=A__ , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
lowerCAmelCase_ : List[str] = func(A__ , **A__ )
print("""shuffling dataset""" )
lowerCAmelCase_ : Tuple = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(A__ ) )
lowerCAmelCase_ : List[str] = func(
A__ , **A__ )
with open(A__ , """wb""" ) as f:
f.write(json.dumps(A__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 398 | 0 |
'''simple docstring'''
import string
import numpy
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE__ )
class lowercase__ :
'''simple docstring'''
A_ : Union[str, Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A_ : Dict = numpy.vectorize(lambda _snake_case : x % 36 )
A_ : str = numpy.vectorize(_snake_case )
def __init__( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.modulus(__snake_case ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_SCREAMING_SNAKE_CASE : Optional[Any] = encrypt_key.shape[0]
def UpperCAmelCase_ ( self , __snake_case ):
return self.key_string.index(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
return self.key_string[round(__snake_case )]
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = det % len(self.key_string )
_SCREAMING_SNAKE_CASE : List[str] = len(self.key_string )
if greatest_common_divisor(__snake_case , len(self.key_string ) ) != 1:
_SCREAMING_SNAKE_CASE : str = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Dict = [char for char in text.upper() if char in self.key_string]
_SCREAMING_SNAKE_CASE : Union[str, Any] = chars[-1]
while len(__snake_case ) % self.break_key != 0:
chars.append(__snake_case )
return "".join(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : str = self.process_text(text.upper() )
_SCREAMING_SNAKE_CASE : List[Any] = """"""
for i in range(0 , len(__snake_case ) - self.break_key + 1 , self.break_key ):
_SCREAMING_SNAKE_CASE : Optional[Any] = text[i : i + self.break_key]
_SCREAMING_SNAKE_CASE : Tuple = [self.replace_letters(__snake_case ) for char in batch]
_SCREAMING_SNAKE_CASE : Tuple = numpy.array([vec] ).T
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.modulus(self.encrypt_key.dot(__snake_case ) ).T.tolist()[
0
]
_SCREAMING_SNAKE_CASE : Any = """""".join(
self.replace_digits(__snake_case ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_SCREAMING_SNAKE_CASE : List[str] = det % len(self.key_string )
_SCREAMING_SNAKE_CASE : str = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_SCREAMING_SNAKE_CASE : Any = i
break
_SCREAMING_SNAKE_CASE : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__snake_case ) )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = self.make_decrypt_key()
_SCREAMING_SNAKE_CASE : int = self.process_text(text.upper() )
_SCREAMING_SNAKE_CASE : Optional[int] = """"""
for i in range(0 , len(__snake_case ) - self.break_key + 1 , self.break_key ):
_SCREAMING_SNAKE_CASE : str = text[i : i + self.break_key]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.replace_letters(__snake_case ) for char in batch]
_SCREAMING_SNAKE_CASE : str = numpy.array([vec] ).T
_SCREAMING_SNAKE_CASE : Tuple = self.modulus(decrypt_key.dot(__snake_case ) ).T.tolist()[0]
_SCREAMING_SNAKE_CASE : List[Any] = """""".join(
self.replace_digits(__snake_case ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = int(input("""Enter the order of the encryption key: """ ) )
_SCREAMING_SNAKE_CASE : str = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Tuple = [int(SCREAMING_SNAKE_CASE__ ) for x in input().split()]
hill_matrix.append(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : int = HillCipher(numpy.array(SCREAMING_SNAKE_CASE__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_SCREAMING_SNAKE_CASE : str = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_SCREAMING_SNAKE_CASE : Optional[int] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(SCREAMING_SNAKE_CASE__ ) )
elif option == "2":
_SCREAMING_SNAKE_CASE : Tuple = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 533 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_SCREAMING_SNAKE_CASE : List[str] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = factor * value
_SCREAMING_SNAKE_CASE : Optional[Any] = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 533 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase = '''pt'''
elif is_tf_available():
lowercase = '''tf'''
else:
lowercase = '''jax'''
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Any = PerceiverTokenizer
snake_case__ : Tuple = False
def a_ ( self ):
super().setUp()
__SCREAMING_SNAKE_CASE : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a_ ( self ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def a_ ( self , **a__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , a__ , a__=False , a__=20 , a__=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__SCREAMING_SNAKE_CASE : Dict = []
for i in range(len(a__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__SCREAMING_SNAKE_CASE : Optional[int] = list(filter(lambda a__ : re.match(R"^[ a-zA-Z]+$" , t[1] ) , a__ ) )
__SCREAMING_SNAKE_CASE : Dict = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
__SCREAMING_SNAKE_CASE : Dict = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
__SCREAMING_SNAKE_CASE : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
__SCREAMING_SNAKE_CASE : Any = [t[0] for t in toks]
# Ensure consistency
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
__SCREAMING_SNAKE_CASE : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
__SCREAMING_SNAKE_CASE : int = " " + output_txt
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = "Unicode €."
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(a__ )
__SCREAMING_SNAKE_CASE : int = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , a__ )
# decoding
__SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(a__ )
self.assertEqual(a__ , "[CLS]Unicode €.[SEP]" )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer("e è é ê ë" )
__SCREAMING_SNAKE_CASE : Optional[Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , a__ )
# decoding
__SCREAMING_SNAKE_CASE : int = tokenizer.decode(a__ )
self.assertEqual(a__ , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : List[str] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[Any] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
__SCREAMING_SNAKE_CASE : Tuple = list(batch.input_ids.numpy()[0] )
else:
__SCREAMING_SNAKE_CASE : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__SCREAMING_SNAKE_CASE : int = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , a__ )
self.assertIn("attention_mask" , a__ )
self.assertNotIn("decoder_input_ids" , a__ )
self.assertNotIn("decoder_attention_mask" , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.perceiver_tokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = [
"Summary of the text.",
"Another summary.",
]
__SCREAMING_SNAKE_CASE : str = tokenizer(
text_target=a__ , max_length=32 , padding="max_length" , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def a_ ( self ):
# safety check on max_len default value so we are sure the test works
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : Union[str, Any] = " He is very happy, UNwant\u00E9d,running"
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.__class__.from_pretrained(a__ )
__SCREAMING_SNAKE_CASE : int = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
__SCREAMING_SNAKE_CASE : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.__class__.from_pretrained(a__ )
__SCREAMING_SNAKE_CASE : Dict = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__SCREAMING_SNAKE_CASE : List[Any] = json.load(a__ )
with open(os.path.join(a__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__SCREAMING_SNAKE_CASE : Optional[int] = json.load(a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [f'<extra_id_{i}>' for i in range(125 )]
__SCREAMING_SNAKE_CASE : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
__SCREAMING_SNAKE_CASE : Dict = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(a__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__SCREAMING_SNAKE_CASE : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=a__ )]
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def a_ ( self ):
pass
def a_ ( self ):
pass
def a_ ( self ):
pass
def a_ ( self ):
pass
def a_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__SCREAMING_SNAKE_CASE : int = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
__SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
| 564 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase = logging.get_logger(__name__)
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *a__ , **a__ ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ )
| 564 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : int ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = BlipImageProcessor()
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
UpperCAmelCase = BlipProcessor(a__ , a__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Dict , **a__ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).tokenizer
def __snake_case ( self : Dict , **a__ : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Tuple ):
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[str] ):
UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(a__ , return_tensors='''np''' )
UpperCAmelCase = processor(images=a__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self : Any ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = processor(text=a__ )
UpperCAmelCase = tokenizer(a__ , return_token_type_ids=a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : str ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(a__ )
UpperCAmelCase = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def __snake_case ( self : int ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=a__ , images=a__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 51 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase = "cpu" , _UpperCamelCase = "openai/clip-vit-large-patch14" ):
"""simple docstring"""
lowerCAmelCase__ = device
lowerCAmelCase__ = CLIPTokenizerFast.from_pretrained(_UpperCamelCase )
lowerCAmelCase__ = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
lowerCAmelCase__ = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
lowerCAmelCase__ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCAmelCase__ = torchvision.transforms.Resize(2_24 )
lowerCAmelCase__ = torchvision.transforms.CenterCrop(2_24 )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.resize(_UpperCamelCase )
lowerCAmelCase__ = self.center_crop(_UpperCamelCase )
lowerCAmelCase__ = self.normalize(_UpperCamelCase )
return images
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer(text=_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = self.preprocess_img(_UpperCamelCase )
lowerCAmelCase__ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self , _UpperCamelCase=10 , _UpperCamelCase=0.01 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase="image" , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = None
lowerCAmelCase__ = device if device else get_device()
if vqgan:
lowerCAmelCase__ = vqgan
else:
lowerCAmelCase__ = load_vqgan(self.device , conf_path=_UpperCamelCase , ckpt_path=_UpperCamelCase )
self.vqgan.eval()
if clip:
lowerCAmelCase__ = clip
else:
lowerCAmelCase__ = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
lowerCAmelCase__ = ProcessorGradientFlow(device=self.device )
lowerCAmelCase__ = iterations
lowerCAmelCase__ = lr
lowerCAmelCase__ = log
lowerCAmelCase__ = make_grid
lowerCAmelCase__ = return_val
lowerCAmelCase__ = quantize
lowerCAmelCase__ = self.vqgan.decoder.z_shape
def UpperCamelCase__ ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=5 , _UpperCamelCase=True ):
"""simple docstring"""
lowerCAmelCase__ = []
if output_path is None:
lowerCAmelCase__ = './animation.gif'
if input_path is None:
lowerCAmelCase__ = self.save_path
lowerCAmelCase__ = sorted(glob(input_path + '/*' ) )
if not len(_UpperCamelCase ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(_UpperCamelCase ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
lowerCAmelCase__ = total_duration / len(_UpperCamelCase )
lowerCAmelCase__ = [frame_duration] * len(_UpperCamelCase )
if extend_frames:
lowerCAmelCase__ = 1.5
lowerCAmelCase__ = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(_UpperCamelCase ) )
imageio.mimsave(_UpperCamelCase , _UpperCamelCase , duration=_UpperCamelCase )
print(F"gif saved to {output_path}" )
def UpperCamelCase__ ( self , _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
lowerCAmelCase__ = preprocess(Image.open(_UpperCamelCase ) , target_image_size=2_56 ).to(self.device )
lowerCAmelCase__ = preprocess_vqgan(_UpperCamelCase )
lowerCAmelCase__ , *lowerCAmelCase__ = self.vqgan.encode(_UpperCamelCase )
return z
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.latent.detach().requires_grad_()
lowerCAmelCase__ = base_latent + transform_vector
if self.quantize:
lowerCAmelCase__ , *lowerCAmelCase__ = self.vqgan.quantize(_UpperCamelCase )
else:
lowerCAmelCase__ = trans_latent
return self.vqgan.decode(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = self.clip_preprocessor(text=_UpperCamelCase , images=_UpperCamelCase , return_tensors='pt' , padding=_UpperCamelCase )
lowerCAmelCase__ = self.clip(**_UpperCamelCase )
lowerCAmelCase__ = clip_outputs.logits_per_image
if weights is not None:
lowerCAmelCase__ = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._get_clip_similarity(pos_prompts['prompts'] , _UpperCamelCase , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
lowerCAmelCase__ = self._get_clip_similarity(neg_prompts['prompts'] , _UpperCamelCase , weights=neg_prompts['weights'] )
else:
lowerCAmelCase__ = torch.tensor([1] , device=self.device )
lowerCAmelCase__ = -torch.log(_UpperCamelCase ) + torch.log(_UpperCamelCase )
return loss
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = torch.randn_like(self.latent , requires_grad=_UpperCamelCase , device=self.device )
lowerCAmelCase__ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCAmelCase__ = self._add_vector(_UpperCamelCase )
lowerCAmelCase__ = loop_post_process(_UpperCamelCase )
lowerCAmelCase__ = self._get_CLIP_loss(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
print('CLIP loss' , _UpperCamelCase )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=_UpperCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
wandb.init(reinit=_UpperCamelCase , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
lowerCAmelCase__ = Image.open(_UpperCamelCase )
lowerCAmelCase__ = image.resize((2_56, 2_56) )
wandb.log('Original Image' , wandb.Image(_UpperCamelCase ) )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if not prompts:
return []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase__ = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(_UpperCamelCase , (tuple, list) ):
lowerCAmelCase__ = prompt[0]
lowerCAmelCase__ = float(prompt[1] )
elif ":" in prompt:
lowerCAmelCase__ , lowerCAmelCase__ = prompt.split(':' )
lowerCAmelCase__ = float(_UpperCamelCase )
else:
lowerCAmelCase__ = prompt
lowerCAmelCase__ = 1.0
processed_prompts.append(_UpperCamelCase )
weights.append(_UpperCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_UpperCamelCase , device=self.device ),
}
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , ):
"""simple docstring"""
if image_path:
lowerCAmelCase__ = self._get_latent(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCAmelCase__ = self.process_prompts(_UpperCamelCase )
lowerCAmelCase__ = self.process_prompts(_UpperCamelCase )
if save_final and save_path is None:
lowerCAmelCase__ = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
else:
lowerCAmelCase__ = save_path + '_' + get_timestamp()
os.makedirs(_UpperCamelCase )
lowerCAmelCase__ = save_path
lowerCAmelCase__ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(_UpperCamelCase ) )
lowerCAmelCase__ = loop_post_process(_UpperCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ):
if show_intermediate:
show_pil(_UpperCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(_UpperCamelCase )} )
if show_final:
show_pil(_UpperCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png" ) )
| 365 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Tuple = {"""tokenizer_file""": """tokenizer.json"""}
__snake_case : str = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<unk>" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<pad>" , _UpperCamelCase=False , _UpperCamelCase=False , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , **_UpperCamelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCamelCase ) != add_prefix_space:
lowerCAmelCase__ = getattr(_UpperCamelCase , pre_tok_state.pop('type' ) )
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = pre_tok_class(**_UpperCamelCase )
lowerCAmelCase__ = add_prefix_space
def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = kwargs.get('is_split_into_words' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = kwargs.get('is_split_into_words' , _UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.' )
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
lowerCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 365 | 1 |
from manim import *
class __lowercase ( __UpperCamelCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = Rectangle(height=0.5 , width=0.5 )
A_ = Rectangle(height=0.25 , width=0.25 )
A_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ = [mem.copy() for i in range(6 )]
A_ = [mem.copy() for i in range(6 )]
A_ = VGroup(*a__ ).arrange(a__ , buff=0 )
A_ = VGroup(*a__ ).arrange(a__ , buff=0 )
A_ = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
A_ = Text('''CPU''' , font_size=2_4 )
A_ = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
A_ = [mem.copy() for i in range(4 )]
A_ = VGroup(*a__ ).arrange(a__ , buff=0 )
A_ = Text('''GPU''' , font_size=2_4 )
A_ = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
A_ = [mem.copy() for i in range(6 )]
A_ = VGroup(*a__ ).arrange(a__ , buff=0 )
A_ = Text('''Model''' , font_size=2_4 )
A_ = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
A_ = []
A_ = []
A_ = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
A_ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
model_cpu_arr.append(a__ )
self.add(*a__ , *a__ , *a__ )
A_ = [mem.copy() for i in range(6 )]
A_ = VGroup(*a__ ).arrange(a__ , buff=0 )
A_ = Text('''Loaded Checkpoint''' , font_size=2_4 )
A_ = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a__ )
A_ = []
A_ = []
for i, rect in enumerate(a__ ):
A_ = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
ckpt_arr.append(a__ )
A_ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a__ )
self.add(*a__ , *a__ )
A_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
A_ = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a__ )
A_ = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
A_ = [meta_mem.copy() for i in range(6 )]
A_ = [meta_mem.copy() for i in range(6 )]
A_ = VGroup(*a__ ).arrange(a__ , buff=0 )
A_ = VGroup(*a__ ).arrange(a__ , buff=0 )
A_ = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
A_ = Text('''Disk''' , font_size=2_4 )
A_ = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(a__ , run_time=3 ) , Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
A_ = []
for i, rect in enumerate(a__ ):
A_ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(FadeOut(a__ ) )
A_ = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ , run_time=3 ) )
self.play(
FadeOut(a__ , a__ , *a__ , *a__ ) , )
self.wait() | 141 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ : int = logging.get_logger(__name__)
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
UpperCamelCase__ : Any = WavaVecaForSequenceClassification.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = downstream_dict['projector.weight']
UpperCamelCase__ : str = downstream_dict['projector.bias']
UpperCamelCase__ : str = downstream_dict['model.post_net.linear.weight']
UpperCamelCase__ : Tuple = downstream_dict['model.post_net.linear.bias']
return model
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCamelCase__ : Any = WavaVecaForAudioFrameClassification.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = downstream_dict['model.linear.weight']
UpperCamelCase__ : List[Any] = downstream_dict['model.linear.bias']
return model
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCamelCase__ : int = WavaVecaForXVector.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = downstream_dict['connector.weight']
UpperCamelCase__ : Any = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCamelCase__ : Tuple = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCamelCase__ : Optional[int] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCamelCase__ : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCamelCase__ : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCamelCase__ : int = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCamelCase__ : str = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCamelCase__ : Any = downstream_dict['objective.W']
return model
@torch.no_grad()
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
UpperCamelCase__ : int = checkpoint['Downstream']
UpperCamelCase__ : Any = WavaVecaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
UpperCamelCase__ : int = convert_classification(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif arch.endswith('ForAudioFrameClassification' ):
UpperCamelCase__ : List[Any] = convert_diarization(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif arch.endswith('ForXVector' ):
UpperCamelCase__ : Tuple = convert_xvector(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCamelCase__ : Dict = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 410 | 0 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCamelCase_(*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 )-> Dict:
from .. import __version__
_SCREAMING_SNAKE_CASE : int = take_from
_SCREAMING_SNAKE_CASE : Union[str, Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
_SCREAMING_SNAKE_CASE : str = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
_SCREAMING_SNAKE_CASE : str = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
_SCREAMING_SNAKE_CASE : Optional[int] = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_SCREAMING_SNAKE_CASE : Tuple = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_SCREAMING_SNAKE_CASE : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
_SCREAMING_SNAKE_CASE : Dict = call_frame.filename
_SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.lineno
_SCREAMING_SNAKE_CASE : str = call_frame.function
_SCREAMING_SNAKE_CASE : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 705 | """simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : int = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : List[Any] = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple:
_SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = parquet_path
elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
_SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for split in splits:
_SCREAMING_SNAKE_CASE : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
if split:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE : Optional[int] = """train"""
_SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path}
_SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache"""
_SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" )
_SCREAMING_SNAKE_CASE : str = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]}
_SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} )
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
| 635 | 0 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def lowerCamelCase__ ( _A ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
lowerCAmelCase: Optional[int] = parser.parse_args()
lowerCAmelCase: Any = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 526 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase: Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
lowerCAmelCase: Optional[int] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCAmelCase: int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__:
lowercase__ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the training data."""} )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
lowercase__ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase__ = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
lowercase__ = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowercase_ ( self : List[Any] ):
a : Any = {}
if self.train_dir is not None:
a : Dict = self.train_dir
if self.validation_dir is not None:
a : Union[str, Any] = self.validation_dir
a : Any = data_files if data_files else None
@dataclass
class a__:
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCamelCase__ )} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Stride to use for the encoder."""} , )
class a__:
def __init__( self : List[str] , __snake_case : int=1_92 , __snake_case : int=32 , __snake_case : List[str]=4 , __snake_case : Union[str, Any]=0.6 ):
a : Any = input_size
a : Union[str, Any] = mask_patch_size
a : int = model_patch_size
a : Tuple = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
a : str = self.input_size // self.mask_patch_size
a : Union[str, Any] = self.mask_patch_size // self.model_patch_size
a : str = self.rand_size**2
a : Tuple = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : str ):
a : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
a : List[str] = np.zeros(self.token_count , dtype=__snake_case )
a : Any = 1
a : List[str] = mask.reshape((self.rand_size, self.rand_size) )
a : Tuple = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCamelCase__ ( _A ):
a : str = torch.stack([example['pixel_values'] for example in examples] )
a : List[str] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , _A , _A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a : int = training_args.get_process_log_level()
logger.setLevel(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
a : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
a : Union[str, Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _A ) and data_args.train_val_split > 0.0:
a : Any = ds['train'].train_test_split(data_args.train_val_split )
a : str = split['train']
a : Any = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a : Tuple = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
a : Any = AutoConfig.from_pretrained(model_args.config_name_or_path , **_A )
elif model_args.model_name_or_path:
a : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_A )
else:
a : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_A , 'decoder_type' ):
a : List[str] = 'simmim'
# adapt config
a : str = model_args.image_size if model_args.image_size is not None else config.image_size
a : Union[str, Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
a : Any = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
a : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_A )
elif model_args.model_name_or_path:
a : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_A )
else:
a : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
a : int = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
a : Tuple = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
a : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(_A )
if training_args.do_train:
a : Tuple = ds['train'].column_names
else:
a : Dict = ds['validation'].column_names
if data_args.image_column_name is not None:
a : Optional[Any] = data_args.image_column_name
elif "image" in column_names:
a : str = 'image'
elif "img" in column_names:
a : Union[str, Any] = 'img'
else:
a : str = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
a : Optional[int] = Compose(
[
Lambda(lambda _A : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
a : Dict = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_A ):
a : Dict = [transforms(_A ) for image in examples[image_column_name]]
a : Optional[int] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
a : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_A )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
a : str = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_A )
# Initialize our trainer
a : List[str] = Trainer(
model=_A , args=_A , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_A , data_collator=_A , )
# Training
if training_args.do_train:
a : str = None
if training_args.resume_from_checkpoint is not None:
a : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a : Dict = last_checkpoint
a : Optional[int] = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a : List[Any] = trainer.evaluate()
trainer.log_metrics('eval' , _A )
trainer.save_metrics('eval' , _A )
# Write model card and (optionally) push to hub
a : str = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
if __name__ == "__main__":
main() | 526 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int = 1_2_8 ,SCREAMING_SNAKE_CASE__ : int = 2_5_6 ,SCREAMING_SNAKE_CASE__ : float = 2000.0 ,SCREAMING_SNAKE_CASE__ : int = 7_6_8 ,SCREAMING_SNAKE_CASE__ : int = 1_2 ,SCREAMING_SNAKE_CASE__ : int = 1_2 ,SCREAMING_SNAKE_CASE__ : int = 6_4 ,SCREAMING_SNAKE_CASE__ : int = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,):
super().__init__()
__lowerCamelCase : Optional[int] = nn.Sequential(
nn.Linear(SCREAMING_SNAKE_CASE__ ,d_model * 4 ,bias=SCREAMING_SNAKE_CASE__) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=SCREAMING_SNAKE_CASE__) ,nn.SiLU() ,)
__lowerCamelCase : List[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = False
__lowerCamelCase : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = nn.Dropout(p=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = nn.ModuleList()
for lyr_num in range(SCREAMING_SNAKE_CASE__):
# FiLM conditional T5 decoder
__lowerCamelCase : str = DecoderLayer(d_model=SCREAMING_SNAKE_CASE__ ,d_kv=SCREAMING_SNAKE_CASE__ ,num_heads=SCREAMING_SNAKE_CASE__ ,d_ff=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__)
self.decoders.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TaLayerNorm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = nn.Dropout(p=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = torch.mul(query_input.unsqueeze(-1) ,key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase : Dict = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype)
__lowerCamelCase : List[Any] = self.conditioning_emb(SCREAMING_SNAKE_CASE__).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase : List[Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase : str = torch.broadcast_to(
torch.arange(SCREAMING_SNAKE_CASE__ ,device=decoder_input_tokens.device) ,(batch, seq_length) ,)
__lowerCamelCase : Any = self.position_encoding(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.continuous_inputs_projection(SCREAMING_SNAKE_CASE__)
inputs += position_encodings
__lowerCamelCase : List[str] = self.dropout(SCREAMING_SNAKE_CASE__)
# decoder: No padding present.
__lowerCamelCase : Tuple = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase : Union[str, Any] = [(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1)
__lowerCamelCase : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1)
for lyr in self.decoders:
__lowerCamelCase : Optional[int] = lyr(
SCREAMING_SNAKE_CASE__ ,conditioning_emb=SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,encoder_attention_mask=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Dict = self.decoder_norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.post_dropout(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.spec_out(SCREAMING_SNAKE_CASE__)
return spec_out
class A_ ( nn.Module ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any]=1E-6):
super().__init__()
__lowerCamelCase : List[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE__ ,d_kv=SCREAMING_SNAKE_CASE__ ,num_heads=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=SCREAMING_SNAKE_CASE__ ,d_kv=SCREAMING_SNAKE_CASE__ ,num_heads=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__ ,layer_norm_epsilon=SCREAMING_SNAKE_CASE__ ,))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE__ ,d_ff=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__ ,layer_norm_epsilon=SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,):
__lowerCamelCase : Any = self.layer[0](
SCREAMING_SNAKE_CASE__ ,conditioning_emb=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,)
if encoder_hidden_states is not None:
__lowerCamelCase : Tuple = torch.where(encoder_attention_mask > 0 ,0 ,-1E10).to(
encoder_hidden_states.dtype)
__lowerCamelCase : Dict = self.layer[1](
SCREAMING_SNAKE_CASE__ ,key_value_states=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,)
# Apply Film Conditional Feed Forward layer
__lowerCamelCase : Optional[Any] = self.layer[-1](SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return (hidden_states,)
class A_ ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str):
super().__init__()
__lowerCamelCase : str = TaLayerNorm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = TaFiLMLayer(in_features=d_model * 4 ,out_features=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = Attention(query_dim=SCREAMING_SNAKE_CASE__ ,heads=SCREAMING_SNAKE_CASE__ ,dim_head=SCREAMING_SNAKE_CASE__ ,out_bias=SCREAMING_SNAKE_CASE__ ,scale_qk=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = nn.Dropout(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : int=None ,):
# pre_self_attention_layer_norm
__lowerCamelCase : Any = self.layer_norm(SCREAMING_SNAKE_CASE__)
if conditioning_emb is not None:
__lowerCamelCase : Dict = self.FiLMLayer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# Self-attention block
__lowerCamelCase : Optional[int] = self.attention(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__)
return hidden_states
class A_ ( nn.Module ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str):
super().__init__()
__lowerCamelCase : List[str] = Attention(query_dim=SCREAMING_SNAKE_CASE__ ,heads=SCREAMING_SNAKE_CASE__ ,dim_head=SCREAMING_SNAKE_CASE__ ,out_bias=SCREAMING_SNAKE_CASE__ ,scale_qk=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TaLayerNorm(SCREAMING_SNAKE_CASE__ ,eps=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = nn.Dropout(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : int=None ,):
__lowerCamelCase : Optional[int] = self.layer_norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.attention(
SCREAMING_SNAKE_CASE__ ,encoder_hidden_states=SCREAMING_SNAKE_CASE__ ,attention_mask=attention_mask.squeeze(1) ,)
__lowerCamelCase : int = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__)
return layer_output
class A_ ( nn.Module ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any]):
super().__init__()
__lowerCamelCase : Optional[int] = TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE__ ,d_ff=SCREAMING_SNAKE_CASE__ ,dropout_rate=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 ,out_features=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = TaLayerNorm(SCREAMING_SNAKE_CASE__ ,eps=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = nn.Dropout(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]=None):
__lowerCamelCase : List[Any] = self.layer_norm(SCREAMING_SNAKE_CASE__)
if conditioning_emb is not None:
__lowerCamelCase : List[str] = self.film(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.DenseReluDense(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__)
return hidden_states
class A_ ( nn.Module ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
super().__init__()
__lowerCamelCase : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = nn.Dropout(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = NewGELUActivation()
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : Tuple = self.act(self.wi_a(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = self.wi_a(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = hidden_gelu * hidden_linear
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.wo(SCREAMING_SNAKE_CASE__)
return hidden_states
class A_ ( nn.Module ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple=1E-6):
super().__init__()
__lowerCamelCase : Dict = nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[Any] = eps
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowerCamelCase : Dict = hidden_states.to(torch.floataa).pow(2).mean(-1 ,keepdim=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase : str = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class A_ ( nn.Module ):
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(SCREAMING_SNAKE_CASE__ ,3.0))))
class A_ ( nn.Module ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict):
super().__init__()
__lowerCamelCase : List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ ,out_features * 2 ,bias=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Dict = self.scale_bias(SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = torch.chunk(SCREAMING_SNAKE_CASE__ ,2 ,-1)
__lowerCamelCase : str = x * (1 + scale) + shift
return x
| 337 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list[int]:
if length <= 0 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(lowerCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 337 | 1 |
'''simple docstring'''
from __future__ import annotations
def A__ ( A : str):
'''simple docstring'''
return [ord(A) - 96 for elem in plain]
def A__ ( A : list[int]):
'''simple docstring'''
return "".join(chr(elem + 96) for elem in encoded)
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Dict = encode(input("-> ").strip().lower())
print("Encoded: " , A)
print("Decoded:" , decode(A))
if __name__ == "__main__":
main()
| 173 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=99 , lowerCamelCase=0 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase="last" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=0 , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Any = parent
UpperCamelCase : int = batch_size
UpperCamelCase : str = seq_length
UpperCamelCase : Dict = is_training
UpperCamelCase : int = use_input_lengths
UpperCamelCase : int = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : List[Any] = gelu_activation
UpperCamelCase : Optional[int] = sinusoidal_embeddings
UpperCamelCase : str = causal
UpperCamelCase : Tuple = asm
UpperCamelCase : Any = n_langs
UpperCamelCase : Any = vocab_size
UpperCamelCase : Optional[Any] = n_special
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : List[Any] = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[str] = type_sequence_label_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = num_labels
UpperCamelCase : int = num_choices
UpperCamelCase : Union[str, Any] = summary_type
UpperCamelCase : Union[str, Any] = use_proj
UpperCamelCase : Optional[int] = scope
UpperCamelCase : Any = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : List[str] = None
if self.use_input_lengths:
UpperCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase : List[str] = None
if self.use_token_type_ids:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase : List[str] = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Dict = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : str = ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = XLMModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Optional[Any] = model(lowerCamelCase , lengths=lowerCamelCase , langs=lowerCamelCase )
UpperCamelCase : Optional[Any] = model(lowerCamelCase , langs=lowerCamelCase )
UpperCamelCase : List[str] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = XLMWithLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Tuple = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = XLMForQuestionAnsweringSimple(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : List[str] = model(lowerCamelCase )
UpperCamelCase : Optional[int] = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
UpperCamelCase : int = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = XLMForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : List[str] = model(lowerCamelCase )
UpperCamelCase : Any = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , p_mask=lowerCamelCase , )
UpperCamelCase : Optional[Any] = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , )
((UpperCamelCase) , ) : Any = result_with_labels.to_tuple()
UpperCamelCase : Dict = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
((UpperCamelCase) , ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : int = XLMForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : List[Any] = model(lowerCamelCase )
UpperCamelCase : Dict = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : int = XLMForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.num_choices
UpperCamelCase : Dict = XLMForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
UpperCamelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Tuple = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
UpperCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Tuple = XLMModelTester(self )
UpperCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase , emb_dim=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1 ) -> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_attentions in attentions] , [True] * len(lowerCamelCase ) )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase ):
# adds PAD dummy token
UpperCamelCase : Dict = min_length + idx + 1
UpperCamelCase : int = min_length + idx + 1
UpperCamelCase : Union[str, Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1 ) -> Optional[int]:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase ) , )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase ):
# adds PAD dummy token
UpperCamelCase : Tuple = min_length + idx + 1
UpperCamelCase : str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase ) , )
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Union[str, Any] = XLMModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
UpperCamelCase : int = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(lowerCamelCase )
UpperCamelCase : Optional[int] = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCamelCase ) # the president
UpperCamelCase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase : List[Any] = model.generate(lowerCamelCase , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase )
| 173 | 1 |
'''simple docstring'''
import math
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , _UpperCamelCase )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
UpperCAmelCase_ = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6))
| 43 | '''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , UpperCAmelCase__ : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Union[str, Any]=400 , UpperCAmelCase__ : List[Any]=3 , ) ->Dict:
UpperCAmelCase_ = parent
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 288}
UpperCAmelCase_ = size_divisor
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase__ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=False ) ->Any:
if not batched:
UpperCAmelCase_ = self.size['''shortest_edge''']
UpperCAmelCase_ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
UpperCAmelCase_ = size / min(UpperCAmelCase__ , UpperCAmelCase__ )
if h < w:
UpperCAmelCase_ , UpperCAmelCase_ = size, scale * w
else:
UpperCAmelCase_ , UpperCAmelCase_ = scale * h, size
UpperCAmelCase_ = int((1333 / 800) * size )
if max(UpperCAmelCase__ , UpperCAmelCase__ ) > max_size:
UpperCAmelCase_ = max_size / max(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = newh * scale
UpperCAmelCase_ = neww * scale
UpperCAmelCase_ , UpperCAmelCase_ = int(newh + 0.5 ), int(neww + 0.5 )
UpperCAmelCase_ , UpperCAmelCase_ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
UpperCAmelCase_ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
UpperCAmelCase_ = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size_divisor''' ) )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
pass
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
# Initialize image processor
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 43 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCAmelCase_ : Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
UpperCAmelCase_ : Optional[int] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
UpperCAmelCase_ : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
UpperCAmelCase_ : Any = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
UpperCAmelCase_ : Any = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
UpperCAmelCase_ : Any = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
UpperCAmelCase_ : Optional[Any] = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case , __snake_case = randrange(len(_lowerCamelCase ) ), randrange(len(_lowerCamelCase ) )
__snake_case = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
__snake_case , __snake_case = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _UpperCamelCase (_lowerCamelCase : int = 1_00 )-> Any:
'''simple docstring'''
return (generate_random_hand() for _ in range(_lowerCamelCase ))
@pytest.mark.parametrize('''hand, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] )-> str:
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : int )-> Any:
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
__snake_case = PokerHand(_lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict )-> Union[str, Any]:
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Dict , _lowerCamelCase : List[Any] )-> int:
'''simple docstring'''
assert PokerHand(_lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , _lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] )-> List[Any]:
'''simple docstring'''
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _UpperCamelCase (_lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : List[Any] )-> Tuple:
'''simple docstring'''
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
def _UpperCamelCase ()-> Optional[Any]:
'''simple docstring'''
__snake_case = [PokerHand(_lowerCamelCase ) for hand in SORTED_HANDS]
__snake_case = poker_hands.copy()
shuffle(_lowerCamelCase )
__snake_case = chain(sorted(_lowerCamelCase ) )
for index, hand in enumerate(_lowerCamelCase ):
assert hand == poker_hands[index]
def _UpperCamelCase ()-> List[Any]:
'''simple docstring'''
__snake_case = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _UpperCamelCase ()-> List[str]:
'''simple docstring'''
__snake_case = PokerHand('''2C 4S AS 3D 5C''' )
__snake_case = True
__snake_case = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = 0
__snake_case = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
__snake_case = os.path.join(_lowerCamelCase , '''poker_hands.txt''' )
with open(_lowerCamelCase ) as file_hand:
for line in file_hand:
__snake_case = line[:14].strip()
__snake_case = line[15:].strip()
__snake_case , __snake_case = PokerHand(_lowerCamelCase ), PokerHand(_lowerCamelCase )
__snake_case = player.compare_with(_lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 3_76
| 24 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __a ( _lowerCAmelCase ):
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
UpperCamelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCamelCase = bertabert.config.encoder.vocab_size
UpperCamelCase = tokenizer.sep_token_id
UpperCamelCase = tokenizer.cls_token_id
UpperCamelCase = 128
UpperCamelCase = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
UpperCamelCase = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
UpperCamelCase = train_dataset.select(range(32 ) )
UpperCamelCase = val_dataset.select(range(16 ) )
UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase = tokenizer(batch["article"] , padding="max_length" , truncation=UpperCAmelCase_ , max_length=512 )
UpperCamelCase = tokenizer(batch["highlights"] , padding="max_length" , truncation=UpperCAmelCase_ , max_length=128 )
UpperCamelCase = inputs.input_ids
UpperCamelCase = inputs.attention_mask
UpperCamelCase = outputs.input_ids
UpperCamelCase = outputs.input_ids.copy()
UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCamelCase = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Any ):
UpperCamelCase = pred.label_ids
UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="steps" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 554 | 0 |
def __A ( _A , _A ):
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 525 | import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
SCREAMING_SNAKE_CASE : str = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
SCREAMING_SNAKE_CASE : Dict = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def __A ( _A ):
"""simple docstring"""
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_A )
# emb -> embedding
if name.startswith("emb." ):
__a = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
__a = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
__a = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , _A )
# ffn -> feed_forward
__a = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
__a = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
__a = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
__a = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
__a = "rwkv." + name
__a = weight
return state_dict
def __A ( _A , _A , _A , _A=None , _A=None , _A=False , _A=None ):
"""simple docstring"""
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
__a = 5_0277
__a = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_A )
__a = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
__a = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_A , _A )
__a = torch.load(_A , map_location="cpu" )
__a = convert_state_dict(_A )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
__a = os.path.join(_A , _A )
# Save the index as well
with open(_A , "w" , encoding="utf-8" ) as f:
__a = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
__a = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size="2GB" )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 525 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = VideoToVideoSDPipeline
a__ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
a__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
a__ : Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
a__ : Any = False
# No `output_type`.
a__ : Union[str, Any] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _lowercase (self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(__a )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowercase (self : List[Any] , __a : Optional[int] , __a : Tuple=0 ):
# 3 frames
UpperCAmelCase_ = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(__a )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(__a )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = VideoToVideoSDPipeline(**__a )
UpperCAmelCase_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase_ = self.get_dummy_inputs(__a )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**__a ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowercase (self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _lowercase (self : List[Any] ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _lowercase (self : Optional[Any] ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _lowercase (self : str ):
pass
def _lowercase (self : Optional[Any] ):
return super().test_progress_bar()
@slow
@skip_mps
class __A ( unittest.TestCase ):
def _lowercase (self : str ):
UpperCAmelCase_ = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = torch.randn((1, 10, 3, 1024, 576) , generator=__a )
UpperCAmelCase_ = video.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = pipe(__a , video=__a , generator=__a , num_inference_steps=3 , output_type="pt" ).frames
UpperCAmelCase_ = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 78 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A_(unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=100 , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.0_2 , A=3 , ):
_lowerCamelCase : str = parent
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : int = batch_size
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : int = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2
_lowerCamelCase : Optional[int] = num_patches + 1
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : List[Any] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : Tuple = FlaxBeitModel(config=A )
_lowerCamelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : List[str] = FlaxBeitForMaskedImageModeling(config=A )
_lowerCamelCase : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCAmelCase ( self , A , A , A ):
_lowerCamelCase : Optional[Any] = self.type_sequence_label_size
_lowerCamelCase : Tuple = FlaxBeitForImageClassification(config=A )
_lowerCamelCase : Tuple = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : str = FlaxBeitForImageClassification(A )
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[Any] = model(A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_(SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ : List[Any] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = FlaxBeitModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(A )
_lowerCamelCase : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : Dict = self._prepare_for_class(A , A )
_lowerCamelCase : str = model_class(A )
@jax.jit
def model_jitted(A , **A ):
return model(pixel_values=A , **A )
with self.subTest('JIT Enabled' ):
_lowerCamelCase : List[Any] = model_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCamelCase : Optional[int] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
_lowerCamelCase : Dict = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class A_(unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = image_processor(images=A , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
_lowerCamelCase : Optional[Any] = np.ones((1, 196) , dtype=A )
# forward pass
_lowerCamelCase : Optional[int] = model(pixel_values=A , bool_masked_pos=A )
_lowerCamelCase : Optional[int] = outputs.logits
# verify the logits
_lowerCamelCase : str = (1, 196, 8192)
self.assertEqual(logits.shape , A )
_lowerCamelCase : Dict = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A , atol=1E-2 ) )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(images=A , return_tensors='np' )
# forward pass
_lowerCamelCase : Tuple = model(**A )
_lowerCamelCase : Optional[int] = outputs.logits
# verify the logits
_lowerCamelCase : int = (1, 1000)
self.assertEqual(logits.shape , A )
_lowerCamelCase : Tuple = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1E-4 ) )
_lowerCamelCase : str = 281
self.assertEqual(logits.argmax(-1 ).item() , A )
@slow
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=A , return_tensors='np' )
# forward pass
_lowerCamelCase : Union[str, Any] = model(**A )
_lowerCamelCase : Optional[int] = outputs.logits
# verify the logits
_lowerCamelCase : List[Any] = (1, 2_1841)
self.assertEqual(logits.shape , A )
_lowerCamelCase : Tuple = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1E-4 ) )
_lowerCamelCase : List[Any] = 2396
self.assertEqual(logits.argmax(-1 ).item() , A )
| 437 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 405 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCAmelCase ="2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCAmelCase =concatenate_datasets
__lowerCAmelCase =DownloadConfig
__lowerCAmelCase =DownloadManager
__lowerCAmelCase =DownloadMode
__lowerCAmelCase =DownloadConfig
__lowerCAmelCase =DownloadMode
__lowerCAmelCase =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 405 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = """vit_mae"""
def __init__( self : List[str] , a__ : List[Any]=768 , a__ : List[str]=12 , a__ : Union[str, Any]=12 , a__ : Optional[int]=3072 , a__ : int="gelu" , a__ : Optional[int]=0.0 , a__ : Optional[int]=0.0 , a__ : str=0.02 , a__ : Union[str, Any]=1E-12 , a__ : Union[str, Any]=224 , a__ : Union[str, Any]=16 , a__ : Dict=3 , a__ : List[str]=True , a__ : Tuple=16 , a__ : Optional[int]=512 , a__ : Dict=8 , a__ : str=2048 , a__ : Optional[Any]=0.75 , a__ : Tuple=False , **a__ : List[str] , ):
super().__init__(**a__ )
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = qkv_bias
__magic_name__ = decoder_num_attention_heads
__magic_name__ = decoder_hidden_size
__magic_name__ = decoder_num_hidden_layers
__magic_name__ = decoder_intermediate_size
__magic_name__ = mask_ratio
__magic_name__ = norm_pix_loss
| 432 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( a , a , a ) -> Union[str, Any]:
'''simple docstring'''
# Initialise PyTorch model
__magic_name__ = BertConfig.from_json_file(a )
print(F'''Building PyTorch model from configuration: {config}''' )
__magic_name__ = BertForPreTraining(a )
# Load weights from tf checkpoint
load_tf_weights_in_bert(a , a , a )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 432 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = "resnet"
_UpperCAmelCase = ["basic", "bottleneck"]
def __init__( self ,_A=3 ,_A=64 ,_A=[256, 512, 1024, 2048] ,_A=[3, 4, 6, 3] ,_A="bottleneck" ,_A="relu" ,_A=False ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Optional[int] = embedding_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : int = depths
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Any = downsample_in_first_stage
_lowerCAmelCase : Union[str, Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_A ) + 1 )]
_lowerCAmelCase : Dict = get_aligned_output_features_output_indices(
out_features=_A ,out_indices=_A ,stage_names=self.stage_names )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-3
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__A = TypeVar("""KT""")
__A = TypeVar("""VT""")
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = "root" , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = key
lowerCAmelCase__ :List[Any] = value
lowerCAmelCase__ :list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"Node({self.key}: {self.value})"
@property
def snake_case ( self ):
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = 0.5 , __UpperCAmelCase = 1_6 ):
'''simple docstring'''
lowerCAmelCase__ :Node[KT, VT] = Node[KT, VT]()
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Tuple = p
lowerCAmelCase__ :List[Any] = max_level
def __str__( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = list(self )
if len(__UpperCAmelCase ) == 0:
return F"SkipList(level={self.level})"
lowerCAmelCase__ :Union[str, Any] = max((len(str(__UpperCAmelCase ) ) for item in items) , default=4 )
lowerCAmelCase__ :Any = max(__UpperCAmelCase , 4 ) + 4
lowerCAmelCase__ :Tuple = self.head
lowerCAmelCase__ :Any = []
lowerCAmelCase__ :List[Any] = node.forward.copy()
lines.append(F"[{node.key}]".ljust(__UpperCAmelCase , '-' ) + '* ' * len(__UpperCAmelCase ) )
lines.append(' ' * label_size + '| ' * len(__UpperCAmelCase ) )
while len(node.forward ) != 0:
lowerCAmelCase__ :Dict = node.forward[0]
lines.append(
F"[{node.key}]".ljust(__UpperCAmelCase , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__UpperCAmelCase ) )
lowerCAmelCase__ :Union[str, Any] = node.forward
lines.append('None'.ljust(__UpperCAmelCase ) + '* ' * len(__UpperCAmelCase ) )
return F"SkipList(level={self.level})\n" + "\n".join(__UpperCAmelCase )
def __iter__( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCAmelCase__ :str = node.forward[0]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = []
lowerCAmelCase__ :Tuple = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCAmelCase__ :Any = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self._locate_node(__UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(__UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCAmelCase__ :Optional[Any] = node.forward[i]
else:
lowerCAmelCase__ :str = update_node.forward[:i]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = self._locate_node(__UpperCAmelCase )
if node is not None:
lowerCAmelCase__ :Tuple = value
else:
lowerCAmelCase__ :Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __UpperCAmelCase ):
update_vector.append(self.head )
lowerCAmelCase__ :int = level
lowerCAmelCase__ :int = Node(__UpperCAmelCase , __UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__UpperCAmelCase )
else:
lowerCAmelCase__ :Dict = new_node
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self._locate_node(__UpperCAmelCase )
if node is not None:
return node.value
return None
def __A () ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
lowerCAmelCase__ :List[str] = skip_list.head
lowerCAmelCase__ :Any = {}
while node.level != 0:
lowerCAmelCase__ :Optional[Any] = node.forward[0]
lowerCAmelCase__ :Optional[int] = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __A () ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
lowerCAmelCase__ :str = skip_list.head
lowerCAmelCase__ :List[Any] = {}
while node.level != 0:
lowerCAmelCase__ :int = node.forward[0]
lowerCAmelCase__ :str = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = SkipList()
assert skip_list.find('Some key' ) is None
def __A () ->Any:
"""simple docstring"""
lowerCAmelCase__ :Dict = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def __A () ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :List[str] = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def __A () ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def __A () ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :str = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_SCREAMING_SNAKE_CASE ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __A () ->Optional[int]:
"""simple docstring"""
def is_sorted(_SCREAMING_SNAKE_CASE ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
lowerCAmelCase__ :Optional[Any] = SkipList()
for i in range(10 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def __A () ->Any:
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __A () ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 93 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase : Tuple = '''Usage of script: script_name <size_of_canvas:int>'''
lowerCamelCase : List[Any] = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Dict = [[False for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
return canvas
def snake_case_ ( lowerCAmelCase_ : list[list[bool]] ):
for i, row in enumerate(lowerCAmelCase_ ):
for j, _ in enumerate(lowerCAmelCase_ ):
__lowercase : Dict = bool(random.getrandbits(1 ) )
def snake_case_ ( lowerCAmelCase_ : list[list[bool]] ):
__lowercase : Optional[int] = np.array(lowerCAmelCase_ )
__lowercase : List[str] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowerCAmelCase_ ):
for c, pt in enumerate(lowerCAmelCase_ ):
__lowercase : str = __judge_point(
lowerCAmelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowercase : Dict = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowercase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def snake_case_ ( lowerCAmelCase_ : bool , lowerCAmelCase_ : list[list[bool]] ):
__lowercase : Dict = 0
__lowercase : List[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowercase : Dict = pt
if pt:
if alive < 2:
__lowercase : Tuple = False
elif alive == 2 or alive == 3:
__lowercase : Tuple = True
elif alive > 3:
__lowercase : Optional[int] = False
else:
if alive == 3:
__lowercase : Dict = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase : str = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase : Optional[int] = create_canvas(canvas_size)
seed(c)
lowerCamelCase ,lowerCamelCase : Tuple = plt.subplots()
fig.show()
lowerCamelCase : Dict = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCamelCase : Optional[Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass | 149 | 0 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Optional[Any] = CLIPConfig
__lowercase : Union[str, Any] = ['CLIPEncoderLayer']
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
super().__init__(_SCREAMING_SNAKE_CASE )
A_ = CLIPVisionModelWithProjection(config.vision_config )
A_ = nn.Linear(config.vision_config.projection_dim , 1 )
A_ = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.5 , _SCREAMING_SNAKE_CASE=0.5 ) -> List[Any]:
A_ = self.vision_model(_SCREAMING_SNAKE_CASE )[0]
A_ = self.p_head(_SCREAMING_SNAKE_CASE )
A_ = nsfw_detected.flatten()
A_ = nsfw_detected > p_threshold
A_ = nsfw_detected.tolist()
if any(_SCREAMING_SNAKE_CASE ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(_SCREAMING_SNAKE_CASE ):
if nsfw_detected_:
A_ = np.zeros(images[idx].shape )
A_ = self.w_head(_SCREAMING_SNAKE_CASE )
A_ = watermark_detected.flatten()
A_ = watermark_detected > w_threshold
A_ = watermark_detected.tolist()
if any(_SCREAMING_SNAKE_CASE ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(_SCREAMING_SNAKE_CASE ):
if watermark_detected_:
A_ = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 174 | '''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = OpenAIGPTTokenizer
__lowercase : Union[str, Any] = OpenAIGPTTokenizerFast
__lowercase : str = True
__lowercase : List[Any] = False
def __A ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
A_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
A_ = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return "lower newer", "lower newer"
def __A ( self ) -> Optional[Any]:
A_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
A_ = '''lower'''
A_ = ['''low''', '''er</w>''']
A_ = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = tokens + ['''<unk>''']
A_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
A_ = '''This is a simple input'''
A_ = ['''This is a simple input 1''', '''This is a simple input 2''']
A_ = ('''This is a simple input''', '''This is a pair''')
A_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='''max_length''' , )
def __A ( self ) -> List[Any]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
pass
| 174 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Dict = 'dpr'
def __init__( self : List[str] , __magic_name__ : List[Any]=30522 , __magic_name__ : Optional[Any]=768 , __magic_name__ : str=12 , __magic_name__ : Optional[int]=12 , __magic_name__ : List[str]=3072 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : int=2 , __magic_name__ : Dict=0.02 , __magic_name__ : Any=1E-12 , __magic_name__ : Tuple=0 , __magic_name__ : Union[str, Any]="absolute" , __magic_name__ : int = 0 , **__magic_name__ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = position_embedding_type
| 48 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : int = "xglm"
A__ : List[Any] = ["past_key_values"]
A__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case_=256008 , snake_case_=2048 , snake_case_=1024 , snake_case_=4096 , snake_case_=24 , snake_case_=16 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> List[str]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = ffn_dim
_UpperCAmelCase = num_layers
_UpperCAmelCase = attention_heads
_UpperCAmelCase = activation_function
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = init_std
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = use_cache
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
| 426 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "timm_backbone"
def __init__( self : Tuple ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Optional[int] ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = backbone
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = features_only
UpperCAmelCase__ = use_pretrained_backbone
UpperCAmelCase__ = True
UpperCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 632 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : str = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "ctrl"
snake_case__ = ["past_key_values"]
snake_case__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any ,lowerCamelCase__ : str=246_534 ,lowerCamelCase__ : List[str]=256 ,lowerCamelCase__ : Optional[int]=1_280 ,lowerCamelCase__ : Any=8_192 ,lowerCamelCase__ : int=48 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Union[str, Any]=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : List[str]=1e-6 ,lowerCamelCase__ : List[str]=0.0_2 ,lowerCamelCase__ : Tuple=True ,**lowerCamelCase__ : Optional[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = n_positions
UpperCAmelCase__ = n_embd
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = dff
UpperCAmelCase__ = resid_pdrop
UpperCAmelCase__ = embd_pdrop
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = use_cache
super().__init__(**lowerCamelCase__ )
| 632 | 1 |
'''simple docstring'''
import baseaa
def __UpperCAmelCase ( lowerCamelCase_) -> bytes:
return baseaa.aaaencode(string.encode('utf-8'))
def __UpperCAmelCase ( lowerCamelCase_) -> str:
return baseaa.aaadecode(lowerCamelCase_).decode('utf-8')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 596 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0) -> int:
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
UpperCamelCase__ : List[str] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCamelCase__ : List[Any] = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
UpperCamelCase__ : int = os.path.join(lowerCamelCase_ , lowerCamelCase_)
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}')
torch.save(lowerCamelCase_ , lowerCamelCase_)
logger.info(f'Model saved to {output_model_file}')
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCamelCase__ : str = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
UpperCamelCase__ : Optional[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_)
logger.info(f'Saving model to {output_model_file}')
torch.save(lowerCamelCase_ , lowerCamelCase_)
logger.info(f'Model saved to {output_model_file}')
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCamelCase__ : List[str] = os.path.join(lowerCamelCase_ , f'{MODEL_NAME}_{model_index}')
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
logger.info(f'Saving model to {ckpt_dir}')
UpperCamelCase__ : str = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=lowerCamelCase_ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0) -> int:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCamelCase_) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object')
return
UpperCamelCase__ : Dict = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
UpperCamelCase__ : Tuple = os.path.join(lowerCamelCase_ , lowerCamelCase_)
logger.info(f'Loading model from {input_model_file}')
UpperCamelCase__ : Union[str, Any] = torch.load(lowerCamelCase_)
logger.info(f'Model loaded from {input_model_file}')
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCamelCase__ : Any = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
UpperCamelCase__ : Optional[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_)
logger.info(f'Loading model from {input_model_file}')
UpperCamelCase__ : Optional[Any] = torch.load(lowerCamelCase_)
logger.info(f'Model loaded from {input_model_file}')
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCamelCase__ : Dict = (
os.path.join(lowerCamelCase_ , f'{MODEL_NAME}_{model_index}')
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}')
UpperCamelCase__ : Dict = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCamelCase_ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_) , planner=DefaultLoadPlanner() , )
UpperCamelCase__ : Optional[Any] = state_dict['model']
logger.info(f'Model loaded from {ckpt_dir}')
model.load_state_dict(lowerCamelCase_)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0) -> Tuple:
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
UpperCamelCase__ : Tuple = FSDP.optim_state_dict(lowerCamelCase_ , lowerCamelCase_)
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCamelCase__ : List[Any] = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
UpperCamelCase__ : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_)
logger.info(f'Saving Optimizer state to {output_optimizer_file}')
torch.save(lowerCamelCase_ , lowerCamelCase_)
logger.info(f'Optimizer state saved in {output_optimizer_file}')
else:
UpperCamelCase__ : Optional[Any] = os.path.join(lowerCamelCase_ , f'{OPTIMIZER_NAME}_{optimizer_index}')
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
logger.info(f'Saving Optimizer state to {ckpt_dir}')
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase_) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}')
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase_ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCamelCase__ : Any = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCamelCase__ : Tuple = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
UpperCamelCase__ : Optional[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_)
logger.info(f'Loading Optimizer state from {input_optimizer_file}')
UpperCamelCase__ : Dict = torch.load(lowerCamelCase_)
logger.info(f'Optimizer state loaded from {input_optimizer_file}')
else:
UpperCamelCase__ : Optional[Any] = (
os.path.join(lowerCamelCase_ , f'{OPTIMIZER_NAME}_{optimizer_index}')
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}')
UpperCamelCase__ : Tuple = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(lowerCamelCase_) , )
UpperCamelCase__ : Dict = optim_state['optimizer']
logger.info(f'Optimizer loaded from {ckpt_dir}')
UpperCamelCase__ : int = FSDP.optim_state_dict_to_load(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
optimizer.load_state_dict(lowerCamelCase_)
| 596 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : List[str] ):
A = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
A = 1024
A = 4096
A = 24
A = 16
A = [5, 11, 17, 23]
A = [256, 512, 1024, 1024]
A = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
A = 768
A = [1, 1, 1, 0.5]
A = [256, 512, 768, 768]
A = 150
A = 16
A = (1, 384, 384)
A = False
A = 'project'
if "ade" in checkpoint_url:
A = True
A = 768
A = [1, 1, 1, 0.5]
A = 150
A = 16
A = 'huggingface/label-files'
A = 'ade20k-id2label.json'
A = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = [1, 150, 480, 480]
return config, expected_shape
def _snake_case ( snake_case__ : List[Any] ):
A = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : str ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
A = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
A = name.replace('patch_embed' , '' )
if "pos_embed" in name:
A = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
A = name.replace('proj' , 'projection' )
if "blocks" in name:
A = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
A = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
A = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
A = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
A = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
A = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
A = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
A = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
A = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
A = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
A = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
A = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
A = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
A = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
A = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
A = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
A = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
A = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
A = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
A = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
A = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
A = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
A = name.replace('pretrained' , 'dpt' )
if "bn" in name:
A = name.replace('bn' , 'batch_norm' )
if "head" in name:
A = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
A = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
A = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
A = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
A = name.replace('..' , '.' )
if "stem.conv" in name:
A = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
A = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
A = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
A = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
A = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
A = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
A = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def _snake_case ( snake_case__ : int , snake_case__ : Any ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
A = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[: config.hidden_size, :]
A = in_proj_bias[: config.hidden_size]
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = in_proj_bias[-config.hidden_size :]
def _snake_case ( ):
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _snake_case ( snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Tuple ):
A , A = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
A = state_dict.pop(snake_case__ )
A = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
A = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
A = 480 if 'ade' in checkpoint_url else 384
A = DPTImageProcessor(size=snake_case__ )
A = prepare_img()
A = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
A = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
A = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
_lowercase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 700 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 0 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_0 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1_0 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=None , ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = parent
_SCREAMING_SNAKE_CASE : List[str] = batch_size
_SCREAMING_SNAKE_CASE : Dict = image_size
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : Any = is_training
_SCREAMING_SNAKE_CASE : int = use_labels
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_act
_SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Any = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE : Tuple = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE : List[Any] = num_patches + 1
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Any:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = TFViTModel(config=__snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = model(__snake_case , training=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_size // 2
_SCREAMING_SNAKE_CASE : Tuple = pixel_values[:, :, :image_size, :image_size]
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__snake_case , interpolate_pos_encoding=__snake_case , training=__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE : Dict = TFViTForImageClassification(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = model(__snake_case , labels=__snake_case , training=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_SCREAMING_SNAKE_CASE : Tuple = self.image_size // 2
_SCREAMING_SNAKE_CASE : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
_SCREAMING_SNAKE_CASE : Dict = model(__snake_case , interpolate_pos_encoding=__snake_case , training=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE : str = 1
_SCREAMING_SNAKE_CASE : Optional[Any] = TFViTForImageClassification(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Dict = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
_SCREAMING_SNAKE_CASE : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__snake_case = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = TFViTModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> Dict:
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Dict = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_SCREAMING_SNAKE_CASE : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Dict = model_class(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __snake_case )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
_SCREAMING_SNAKE_CASE : int = self.default_image_processor
_SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
_SCREAMING_SNAKE_CASE : List[str] = image_processor(images=__snake_case , return_tensors="tf" )
# forward pass
_SCREAMING_SNAKE_CASE : List[Any] = model(**__snake_case )
# verify the logits
_SCREAMING_SNAKE_CASE : Optional[int] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __snake_case )
_SCREAMING_SNAKE_CASE : Any = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) | 249 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
A__ : str =[
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/transformers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
_lowerCAmelCase = comments[0] if len(lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 207 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 454 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
__lowercase =precision
__lowercase =ceil(precision / 14 )
__lowercase =426_880 * Decimal(10_005 ).sqrt()
__lowercase =1
__lowercase =13_591_409
__lowercase =Decimal(_lowerCAmelCase )
for k in range(1 , _lowerCAmelCase ):
__lowercase =factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCAmelCase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase = 50
print(f"The first {n} digits of pi is: {pi(n)}")
| 454 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.