code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def __snake_case ( lowerCAmelCase_ ) -> list[int]:
SCREAMING_SNAKE_CASE__ = [True] * limit
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE__ = i * 2
while index < limit:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = index + i
SCREAMING_SNAKE_CASE__ = [2]
for i in range(3 , lowerCAmelCase_ , 2 ):
if is_prime[i]:
primes.append(lowerCAmelCase_ )
return primes
def __snake_case ( lowerCAmelCase_ = 1_0_0_0_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = prime_sieve(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + length , len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE__ = j - i
SCREAMING_SNAKE_CASE__ = sol
return largest
if __name__ == "__main__":
print(F'{solution() = }')
| 100 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ : List[Any] ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A , _A , _A=None , _A=None ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = start
UpperCamelCase : List[Any] = end
UpperCamelCase : Optional[Any] = val
UpperCamelCase : Optional[Any] = (start + end) // 2
UpperCamelCase : List[str] = left
UpperCamelCase : List[Any] = right
def __repr__( self ):
'''simple docstring'''
return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : int = collection
UpperCamelCase : Tuple = function
if self.collection:
UpperCamelCase : Any = self._build_tree(0 , len(_A ) - 1 )
def _a ( self , _A , _A ):
'''simple docstring'''
self._update_tree(self.root , _A , _A )
def _a ( self , _A , _A ):
'''simple docstring'''
return self._query_range(self.root , _A , _A )
def _a ( self , _A , _A ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(_A , _A , self.collection[start] )
UpperCamelCase : Optional[int] = (start + end) // 2
UpperCamelCase : Union[str, Any] = self._build_tree(_A , _A )
UpperCamelCase : List[str] = self._build_tree(mid + 1 , _A )
return SegmentTreeNode(_A , _A , self.fn(left.val , right.val ) , _A , _A )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
if node.start == i and node.end == i:
UpperCamelCase : List[str] = val
return
if i <= node.mid:
self._update_tree(node.left , _A , _A )
else:
self._update_tree(node.right , _A , _A )
UpperCamelCase : Dict = self.fn(node.left.val , node.right.val )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _A , _A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _A , node.mid ) , self._query_range(node.right , node.mid + 1 , _A ) , )
else:
# range in right child tree
return self._query_range(node.right , _A , _A )
def _a ( self ):
'''simple docstring'''
if self.root is not None:
UpperCamelCase : Optional[Any] = Queue()
queue.put(self.root )
while not queue.empty():
UpperCamelCase : Dict = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 5_0)
__magic_name__ : Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 102 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''depth_multiplier''' ) )
class UpperCAmelCase :
def __init__( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=1_3 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : Dict=0.2_5 , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=1_0_2_4 , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : int="relu6" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Union[str, Any]=1_0 , __lowerCamelCase : Optional[Any]=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = min_depth
_snake_case = tf_padding
_snake_case = int(last_hidden_size * depth_multiplier )
_snake_case = output_stride
_snake_case = hidden_act
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = MobileNetVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
A__ : Optional[int] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[str] = False
A__ : Tuple = False
A__ : List[Any] = False
A__ : List[str] = False
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
_snake_case = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_snake_case = outputs.hidden_states
_snake_case = 2_6
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def snake_case ( ) -> int:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(__lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_snake_case = model(**__lowerCamelCase )
# verify the logits
_snake_case = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_snake_case = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
| 103 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> Any:
A__ = path_or_paths
A__ = split if split or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else "train"
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def snake_case__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> str:
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def snake_case__ ( self ) -> Union[Dataset, IterableDataset]:
pass
| 104 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
class lowerCAmelCase_ :
def __init__( self ,snake_case__ = "" ,snake_case__ = False ):
# Mapping from the first character of the prefix of the node
SCREAMING_SNAKE_CASE_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_leaf
SCREAMING_SNAKE_CASE_ : List[Any] = prefix
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = 0
for q, w in zip(self.prefix ,snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def snake_case ( self ,snake_case__ ):
for word in words:
self.insert(snake_case__ )
def snake_case ( self ,snake_case__ ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
SCREAMING_SNAKE_CASE_ : int = RadixNode(prefix=snake_case__ ,is_leaf=snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : int = self.nodes[word[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
SCREAMING_SNAKE_CASE_ : Any = remaining_prefix
SCREAMING_SNAKE_CASE_ : List[Any] = self.nodes[matching_string[0]]
SCREAMING_SNAKE_CASE_ : List[Any] = RadixNode(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = aux_node
if remaining_word == "":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.nodes.get(word[0] ,snake_case__ )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.nodes.get(word[0] ,snake_case__ )
if not incoming_node:
return False
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
SCREAMING_SNAKE_CASE_ : Any = list(self.nodes.values() )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE_ : Dict = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
SCREAMING_SNAKE_CASE_ : int = False
# If there is 1 edge, we merge it with its child
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(incoming_node.nodes.values() )[0]
SCREAMING_SNAKE_CASE_ : Dict = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
SCREAMING_SNAKE_CASE_ : Dict = merging_node.nodes
return True
def snake_case ( self ,snake_case__ = 0 ):
if self.prefix != "":
print('-' * height ,self.prefix ,' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __UpperCAmelCase ( ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'banana bananas bandana band apple all beast'.split()
SCREAMING_SNAKE_CASE_ : List[Any] = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert test_trie()
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RadixNode()
SCREAMING_SNAKE_CASE_ : Dict = 'banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(lowerCamelCase_ )
print('Words:' , lowerCamelCase_ )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 105 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
__snake_case :Optional[Any] ={
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase_ ( lowerCAmelCase__ : dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> list[str]:
'''simple docstring'''
A = set()
# keep track of all the paths to be checked
A = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A = queue.pop(0 )
# get the last node from the path
A = path[-1]
if node not in explored:
A = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A = list(lowerCAmelCase__ )
new_path.append(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase__ )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase_ ( lowerCAmelCase__ : dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A = [start]
A = set(lowerCAmelCase__ )
# Keep tab on distances from `start` node.
A = {start: 0, target: -1}
while queue:
A = queue.pop(0 )
if node == target:
A = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase__ )
queue.append(lowerCAmelCase__ )
A = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4 | 106 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
_A = ['a', 'b', 'c']
# Defaults to last layer if both are None
_A , _A = get_aligned_output_features_output_indices(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, ['c'] )
self.assertEqual(UpperCamelCase__, [2] )
# Out indices set to match out features
_A , _A = get_aligned_output_features_output_indices(['a', 'c'], UpperCamelCase__, UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, ['a', 'c'] )
self.assertEqual(UpperCamelCase__, [0, 2] )
# Out features set to match out indices
_A , _A = get_aligned_output_features_output_indices(UpperCamelCase__, [0, 2], UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, ['a', 'c'] )
self.assertEqual(UpperCamelCase__, [0, 2] )
# Out features selected from negative indices
_A , _A = get_aligned_output_features_output_indices(UpperCamelCase__, [-3, -1], UpperCamelCase__ )
self.assertEqual(UpperCamelCase__, ['a', 'c'] )
self.assertEqual(UpperCamelCase__, [-3, -1] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
# Stage names must be set
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['a', 'b'], (0, 1), UpperCamelCase__ )
# Out features must be a list
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(('a', 'b'), (0, 1), ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['a', 'b'], (0, 1), ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__, 0, ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(UpperCamelCase__, (0, 1), ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['a', 'b'], (0,), ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['a', 'b'], (0, 2), ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase__ ):
verify_out_features_out_indices(['b', 'a'], (0, 1), ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'], (0, 1, -1), ['a', 'b', 'c', 'd'] )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_A = BackboneMixin()
_A = ['a', 'b', 'c']
_A = ['a', 'c']
_A = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features, ['a', 'c'] )
self.assertEqual(backbone.out_indices, [0, 2] )
# Check out features and indices are updated correctly
_A = ['a', 'b']
self.assertEqual(backbone.out_features, ['a', 'b'] )
self.assertEqual(backbone.out_indices, [0, 1] )
_A = [-3, -1]
self.assertEqual(backbone.out_features, ['a', 'c'] )
self.assertEqual(backbone.out_indices, [-3, -1] )
| 107 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a = logging.get_logger(__name__)
class __a ( _snake_case ):
def __init__( self : Dict ,*lowerCamelCase : Any ,**lowerCamelCase : str ):
'''simple docstring'''
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" ,lowerCamelCase ,)
super().__init__(*lowerCamelCase ,**lowerCamelCase )
| 109 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 0 |
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 351 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__magic_name__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Model type selected in the list: """ + """, """.join(__SCREAMING_SNAKE_CASE )} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a_ = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a_ = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a_ = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a_ = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a_ = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a_ = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """train"""
a_ = """dev"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def __init__( self : Tuple ,_a : List[str] ,_a : List[Any] ,_a : Dict = None ,_a : Any = Split.train ,_a : Optional[int] = False ,_a : Dict = None ,_a : Any = "pt" ,):
'''simple docstring'''
A_ : Union[str, Any] = args
A_ : Dict = is_language_sensitive
A_ : Tuple = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
try:
A_ : Any = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
A_ : int = mode
# Load data features from cache or dataset file
A_ : str = """v2""" if args.version_2_with_negative else """v1"""
A_ : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A_ : List[Any] = cached_features_file + """.lock"""
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
A_ : Optional[Any] = time.time()
A_ : List[Any] = torch.load(UpperCAmelCase__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A_ : List[Any] = self.old_features["""features"""]
A_ : Tuple = self.old_features.get("""dataset""" ,UpperCAmelCase__ )
A_ : Optional[int] = self.old_features.get("""examples""" ,UpperCAmelCase__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
A_ : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
else:
A_ : Union[str, Any] = self.processor.get_train_examples(args.data_dir )
A_ , A_ : Any = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=UpperCAmelCase__ ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=UpperCAmelCase__ ,)
A_ : Optional[int] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} ,UpperCAmelCase__ ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Dict ,_a : List[Any] ):
'''simple docstring'''
A_ : str = self.features[i]
A_ : Any = torch.tensor(feature.input_ids ,dtype=torch.long )
A_ : int = torch.tensor(feature.attention_mask ,dtype=torch.long )
A_ : Dict = torch.tensor(feature.token_type_ids ,dtype=torch.long )
A_ : Optional[Any] = torch.tensor(feature.cls_index ,dtype=torch.long )
A_ : List[str] = torch.tensor(feature.p_mask ,dtype=torch.float )
A_ : List[str] = torch.tensor(feature.is_impossible ,dtype=torch.float )
A_ : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A_ : List[Any] = torch.tensor(feature.start_position ,dtype=torch.long )
A_ : List[Any] = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 665 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ : Union[str, Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
lowerCAmelCase_ : str = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
lowerCAmelCase_ : Optional[int] = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
lowerCAmelCase_ : int = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 692 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case__ : Dict = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = ['CLIPFeatureExtractor']
snake_case__ : str = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 408 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_SCREAMING_SNAKE_CASE = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_SCREAMING_SNAKE_CASE = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
if "://" in dataset_path:
__lowercase = dataset_path.split('''://''' )[1]
return dataset_path
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any ):
__lowercase = not is_remote_filesystem(_UpperCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_UpperCAmelCase ) , fs._strip_protocol(_UpperCAmelCase ) )
else:
fs.mv(_UpperCAmelCase , _UpperCAmelCase , recursive=_UpperCAmelCase )
def _lowerCAmelCase ( ):
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__lowercase = None
__lowercase = None
__lowercase = threading.Lock()
| 502 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A :
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple="resnet50" , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=True , ) -> Optional[Any]:
"""simple docstring"""
_a = parent
_a = out_indices if out_indices is not None else [4]
_a = stage_names
_a = out_features
_a = backbone
_a = batch_size
_a = image_size
_a = num_channels
_a = use_pretrained_backbone
_a = is_training
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_a = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
_a = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (TimmBackbone,) if is_torch_available() else ()
lowercase_ = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_a = TimmBackboneModelTester(self )
_a = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
_a = '''resnet18'''
_a = '''microsoft/resnet-18'''
_a = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
_a = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_a = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
_a = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(UpperCAmelCase__ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = self.has_attentions
# no need to test all models as different heads yield the same functionality
_a = self.all_model_classes[0]
_a = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
_a = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a = model(**UpperCAmelCase__ )
_a = outputs[0][-1]
# Encoder-/Decoder-only models
_a = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_a = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_a = copy.deepcopy(UpperCAmelCase__ )
_a = None
_a = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_a = copy.deepcopy(UpperCAmelCase__ )
_a = False
_a = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a = model(**UpperCAmelCase__ )
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 0 |
import math
import qiskit
def _UpperCamelCase ( snake_case__ = 1, snake_case__ = 1, snake_case__ = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(_UpperCAmelCase, _UpperCAmelCase )
or isinstance(_UpperCAmelCase, _UpperCAmelCase )
or isinstance(_UpperCAmelCase, _UpperCAmelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(_UpperCAmelCase ) != input_a)
or (math.floor(_UpperCAmelCase ) != input_a)
or (math.floor(_UpperCAmelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
__UpperCAmelCase : Any = qiskit.QuantumRegister(4, "qr" )
__UpperCAmelCase : Any = qiskit.ClassicalRegister(2, "cr" )
# list the entries
__UpperCAmelCase : Union[str, Any] = [input_a, input_a, carry_in]
__UpperCAmelCase : Optional[Any] = qiskit.QuantumCircuit(_UpperCAmelCase, _UpperCAmelCase )
for i in range(0, 3 ):
if entry[i] == 2:
quantum_circuit.h(_UpperCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_UpperCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_UpperCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0, 1, 3 ) # ccx = toffoli gate
quantum_circuit.cx(0, 1 )
quantum_circuit.ccx(1, 2, 3 )
quantum_circuit.cx(1, 2 )
quantum_circuit.cx(0, 1 )
quantum_circuit.measure([2, 3], _UpperCAmelCase ) # measure the last two qbits
__UpperCAmelCase : List[str] = qiskit.Aer.get_backend("aer_simulator" )
__UpperCAmelCase : str = qiskit.execute(_UpperCAmelCase, _UpperCAmelCase, shots=1000 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 382 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase ( A_ ):
@staticmethod
@abstractmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> int:
'''simple docstring'''
raise NotImplementedError()
| 204 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A__ : Union[str, Any] = ['''bert-base-uncased''', '''bert-base-cased''']
A__ : int = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class __snake_case ( tf.keras.Model ):
def __init__( self : Any , A_ : str):
super().__init__()
lowerCAmelCase_ : str = tokenizer
lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(UpperCAmelCase__)
lowerCAmelCase_ : List[str] = TFAutoModel.from_config(UpperCAmelCase__)
def UpperCAmelCase__ ( self : int , A_ : List[Any]):
lowerCAmelCase_ : Union[str, Any] = self.tokenizer(UpperCAmelCase__)
lowerCAmelCase_ : Optional[int] = self.bert(**UpperCAmelCase__)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple):
super().setUp()
lowerCAmelCase_ : Any = [
BertTokenizer.from_pretrained(UpperCAmelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCAmelCase_ : List[Any] = [TFBertTokenizer.from_pretrained(UpperCAmelCase__) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(UpperCAmelCase__ , use_fast_bert_tokenizer=UpperCAmelCase__)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
lowerCAmelCase_ : Any = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCAmelCase_ : List[str] = list(zip(self.test_sentences , self.test_sentences[::-1]))
def UpperCAmelCase__ ( self : Any):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCAmelCase_ : Optional[Any] = tokenizer(UpperCAmelCase__ , return_tensors='''tf''' , padding='''longest''')
lowerCAmelCase_ : str = tf_tokenizer(UpperCAmelCase__)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key]))
@slow
def UpperCAmelCase__ ( self : int):
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Optional[int] = tf_tokenizer(self.paired_sentences)
lowerCAmelCase_ : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key]))
@slow
def UpperCAmelCase__ ( self : List[str]):
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Tuple = tf.function(UpperCAmelCase__)
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCAmelCase_ : Union[str, Any] = tf.constant(UpperCAmelCase__)
lowerCAmelCase_ : str = compiled_tokenizer(UpperCAmelCase__)
lowerCAmelCase_ : List[Any] = tf_tokenizer(UpperCAmelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def UpperCAmelCase__ ( self : Any):
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ : Optional[Any] = ModelToSave(tokenizer=UpperCAmelCase__)
lowerCAmelCase_ : List[str] = tf.convert_to_tensor(self.test_sentences)
lowerCAmelCase_ : Optional[Any] = model(UpperCAmelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase_ : Tuple = Path(UpperCAmelCase__) / '''saved.model'''
model.save(UpperCAmelCase__)
lowerCAmelCase_ : Optional[Any] = tf.keras.models.load_model(UpperCAmelCase__)
lowerCAmelCase_ : Union[str, Any] = loaded_model(UpperCAmelCase__)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
| 171 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
"""simple docstring"""
from collections.abc import Generator
def lowerCamelCase_ ():
_UpperCAmelCase , _UpperCAmelCase : List[str] = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = b, a + b
yield b
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] = 1000 ):
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[str] = fibonacci_generator()
while len(str(next(_UpperCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 506 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
lowercase = BlipProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
lowercase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(UpperCAmelCase__ , return_tensors='np' )
lowercase = processor(images=UpperCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowercase = 'lower newer'
lowercase = processor(text=UpperCAmelCase__ )
lowercase = tokenizer(UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowercase = 'lower newer'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(UpperCAmelCase__ )
lowercase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowercase = 'lower newer'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 84 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
import sys
UpperCAmelCase__ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a_ (__A = N ) -> int:
"""simple docstring"""
__a : Tuple = -sys.maxsize - 1
for i in range(len(_UpperCAmelCase ) - 12 ):
__a : int = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__a : Union[str, Any] = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 351 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Any):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A_ : Dict = flax_key_tuple[:-1] + ("""weight""",)
A_ : Optional[int] = torch.permute(_UpperCAmelCase , (0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase):
# linear layer
A_ : Any = flax_key_tuple[:-1] + ("""weight""",)
A_ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A_ : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : List[Any]):
if "metadata" in layer:
A_ : str = layer.split("""metadata""")
A_ : List[str] = """""".join(split_layer[0])[:-1]
A_ : List[str] = [tuple(("""metadata""" + split_layer[1]).split("""/"""))]
elif "kvstore" in layer:
A_ : List[Any] = layer.split("""kvstore""")
A_ : Dict = """""".join(split_layer[0])[:-1]
A_ : Dict = [tuple(("""kvstore""" + split_layer[1]).split("""/"""))]
else:
A_ : Dict = layer.split("""/""")
A_ : List[str] = """/""".join(split_layer[:-1])
A_ : List[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
A_ : str = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
A_ : Union[str, Any] = """file"""
else:
A_ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : List[str]):
A_ : Optional[int] = rename_keys(_UpperCAmelCase)
A_ : int = {}
for k, v in current_block.items():
A_ : Dict = v
A_ : Any = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : str = WEIGHTS_NAME):
A_ : str = convert_file_size_to_int(_UpperCAmelCase)
A_ : Tuple = []
A_ : List[Any] = {}
A_ : Union[str, Any] = 0
A_ : Dict = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""") as fp:
A_ : Union[str, Any] = serialization.msgpack_restore(fp.read())["""optimizer"""]["""target"""]
A_ : Union[str, Any] = flatten_dict(_UpperCAmelCase , sep="""/""")
A_ : int = {}
for layer in checkpoint_info.keys():
A_ , A_ , A_ : List[str] = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if curr_real_layer_name in all_layers:
A_ : Dict = content
else:
A_ : List[str] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A_ : int = ts.open(unflatten_dict(all_layers[key])).result().read().result()
A_ : List[Any] = torch.tensor(_UpperCAmelCase)
A_ : Optional[int] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
A_ , A_ : Any = rename_base_flax_keys(tuple(key.split("""/""")) , _UpperCAmelCase)
A_ : Optional[int] = """/""".join(_UpperCAmelCase)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A_ : Dict = os.path.join(
_UpperCAmelCase , weights_name.replace(""".bin""" , F'-{len(_UpperCAmelCase)+1:05d}-of-???.bin'))
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase)
sharded_state_dicts.append(current_block.keys())
del current_block
A_ : Optional[Any] = {}
A_ : List[Any] = 0
A_ : Tuple = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase))
current_block_size += weight_size
total_size += weight_size
# Add the last block
A_ : int = os.path.join(_UpperCAmelCase , weights_name.replace(""".bin""" , F'-{len(_UpperCAmelCase)+1:05d}-of-???.bin'))
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(_UpperCAmelCase) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A_ : Any = {}
A_ : Tuple = {}
for idx, shard in enumerate(_UpperCAmelCase):
A_ : List[Any] = weights_name.replace(
""".bin""" , F'-{idx+1:05d}-of-{len(_UpperCAmelCase):05d}.bin') # len(sharded_state_dicts):05d}
A_ : List[Any] = os.path.join(_UpperCAmelCase , weights_name.replace(""".bin""" , F'-{idx+1:05d}-of-???.bin'))
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
A_ : Dict = shard
for key in shard:
A_ : str = shard_file
# Add the metadata
A_ : Optional[int] = {"""total_size""": total_size}
A_ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase) , """w""" , encoding="""utf-8""") as f:
A_ : Any = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + """\n"""
f.write(_UpperCAmelCase)
return metadata, index
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__magic_name__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A_ : List[str] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""")
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""")
A_ : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""")
A_ : List[str] = TaTokenizer.from_pretrained("""t5-small""")
A_ : str = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A_ : str = tokenizer(_UpperCAmelCase , return_tensors="""pt""").input_ids
A_ : str = model.generate(_UpperCAmelCase , decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 665 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __a : int , __a : int=7 , __a : str=3 , __a : str=30 , __a : Union[str, Any]=4_00 , __a : Union[str, Any]=True , __a : Union[str, Any]=None , __a : int=True , __a : List[Any]=1 / 2_55 , __a : List[Any]=True , __a : int=[0.5, 0.5, 0.5] , __a : Union[str, Any]=[0.5, 0.5, 0.5] , __a : Any=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_a = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
_a = parent
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_pad
def UpperCamelCase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self : Any , __a : List[str] , __a : Any=False ):
if not batched:
_a = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
_a , _a = image.size
else:
_a , _a = image.shape[1], image.shape[2]
if w < h:
_a = int(self.size["shortest_edge"] * h / w )
_a = self.size["shortest_edge"]
elif w > h:
_a = self.size["shortest_edge"]
_a = int(self.size["shortest_edge"] * w / h )
else:
_a = self.size["shortest_edge"]
_a = self.size["shortest_edge"]
else:
_a = []
for image in image_inputs:
_a , _a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a = max(UpperCAmelCase__ , key=lambda __a : item[0] )[0]
_a = max(UpperCAmelCase__ , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =DetrImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : int ):
_a = DetrImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : str ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_rescale" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "rescale_factor" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_pad" ) )
def UpperCamelCase__ ( self : int ):
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
_a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
pass
def UpperCamelCase__ ( self : Tuple ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a , _a = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
_a = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self : str ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self : str ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase__ ( self : str ):
# prepare image and target
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {"image_id": 3_97_69, "annotations": target}
# encode them
_a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
_a = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase__ )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
# verify area
_a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase__ ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase__ )
_a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase__ , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase__ ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase__ ) )
# verify class_labels
_a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase__ ) )
# verify orig_size
_a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase__ ) )
# verify size
_a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase__ ) )
@slow
def UpperCamelCase__ ( self : Dict ):
# prepare image, target and masks_path
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
_a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
_a = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , UpperCAmelCase__ )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
# verify area
_a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCAmelCase__ ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCAmelCase__ )
_a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCAmelCase__ , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCAmelCase__ ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCAmelCase__ ) )
# verify class_labels
_a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCAmelCase__ ) )
# verify masks
_a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCAmelCase__ )
# verify orig_size
_a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCAmelCase__ ) )
# verify size
_a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCAmelCase__ ) )
| 692 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
from math import factorial
def lowerCamelCase__ ( _lowerCamelCase = 100 ) ->int:
return sum(int(_UpperCAmelCase ) for x in str(factorial(_UpperCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 408 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 0 |
'''simple docstring'''
from itertools import product
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] ):
__lowercase = sides_number
__lowercase = max_face_number * dice_number
__lowercase = [0] * (max_total + 1)
__lowercase = 1
__lowercase = range(_UpperCAmelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCAmelCase , repeat=_UpperCAmelCase ):
__lowercase = sum(_UpperCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCAmelCase ( ):
__lowercase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__lowercase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__lowercase = 0
__lowercase = 9
__lowercase = 4 * 9
__lowercase = 6
for peter_total in range(_UpperCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__lowercase = (4**9) * (6**6)
__lowercase = peter_wins_count / total_games_number
__lowercase = round(_UpperCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 502 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_snake_case : Tuple = Lock()
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_UpperCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_a = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_a = min(_UpperCAmelCase , _UpperCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_UpperCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_a = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_a = max(_UpperCAmelCase , _UpperCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_UpperCAmelCase )
def snake_case_ (UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = []
_a = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_a = Pipe()
_a = Pipe()
process_array_.append(
Process(
target=_UpperCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_a = temp_rs
_a = temp_rr
for i in range(1 , len(_UpperCAmelCase ) - 1 ):
_a = Pipe()
_a = Pipe()
process_array_.append(
Process(
target=_UpperCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_a = temp_rs
_a = temp_rr
process_array_.append(
Process(
target=_UpperCAmelCase , args=(
len(_UpperCAmelCase ) - 1,
arr[len(_UpperCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_UpperCAmelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_UpperCAmelCase ) ):
_a = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def snake_case_ ():
'''simple docstring'''
_a = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_UpperCAmelCase )
_a = odd_even_transposition(_UpperCAmelCase )
print('''Sorted List\n''' )
print(*_UpperCAmelCase )
if __name__ == "__main__":
main()
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def _lowerCamelCase ( self: int ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _lowerCamelCase ( self: List[str] ) -> int:
__UpperCAmelCase : Tuple = self.dummy_uncond_unet
__UpperCAmelCase : List[Any] = PNDMScheduler()
__UpperCAmelCase : Union[str, Any] = PNDMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pndm.to(UpperCAmelCase__ )
pndm.set_progress_bar_config(disable=UpperCAmelCase__ )
__UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCAmelCase : str = pndm(generator=UpperCAmelCase__ , num_inference_steps=20 , output_type="numpy" ).images
__UpperCAmelCase : Tuple = torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = pndm(generator=UpperCAmelCase__ , num_inference_steps=20 , output_type="numpy" , return_dict=UpperCAmelCase__ )[0]
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Optional[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: int ) -> int:
__UpperCAmelCase : Optional[Any] = "google/ddpm-cifar10-32"
__UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(UpperCAmelCase__ )
__UpperCAmelCase : Dict = PNDMScheduler()
__UpperCAmelCase : Any = PNDMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pndm.to(UpperCAmelCase__ )
pndm.set_progress_bar_config(disable=UpperCAmelCase__ )
__UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCAmelCase : List[str] = pndm(generator=UpperCAmelCase__ , output_type="numpy" ).images
__UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : List[str] = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 382 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowerCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any=False , ):
output_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , use_external_data_format=_UpperCAmelCase , enable_onnx_checker=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
else:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Tuple = False ):
snake_case : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case : Union[str, Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
snake_case : Optional[Any] = "cpu"
snake_case : Optional[Any] = Path(_UpperCAmelCase )
# VAE DECODER
snake_case : Any = AutoencoderKL.from_pretrained(model_path + "/vae" )
snake_case : Dict = vae_decoder.config.latent_channels
# forward only through the decoder part
snake_case : Union[str, Any] = vae_decoder.decode
onnx_export(
_UpperCAmelCase , model_args=(
torch.randn(1 , _UpperCAmelCase , 25 , 25 ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_UpperCAmelCase , )
del vae_decoder
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
__lowerCamelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 204 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
A__ : int = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
A__ : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
A__ : Any = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
A__ : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
A__ : Union[str, Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
A__ : List[str] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
A__ : Optional[Any] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def UpperCamelCase( ):
lowerCAmelCase_ , lowerCAmelCase_ : Any = randrange(len(_UpperCAmelCase ) ), randrange(len(_UpperCAmelCase ) )
lowerCAmelCase_ : Optional[Any] = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase( __UpperCamelCase : Dict = 100 ):
return (generate_random_hand() for _ in range(_UpperCAmelCase ))
@pytest.mark.parametrize('''hand, expected''' ,_UpperCAmelCase )
def UpperCamelCase( __UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ):
assert PokerHand(_UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' ,_UpperCAmelCase )
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Any ):
assert PokerHand(_UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' ,_UpperCAmelCase )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ):
lowerCAmelCase_ : int = PokerHand(_UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' ,_UpperCAmelCase )
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str] ):
assert PokerHand(_UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' ,_UpperCAmelCase )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ):
assert PokerHand(_UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' ,_UpperCAmelCase )
def UpperCamelCase( __UpperCamelCase : List[Any] ,__UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ):
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' ,generate_random_hands() )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : Any ):
assert PokerHand(_UpperCAmelCase ).compare_with(PokerHand(_UpperCAmelCase ) ) == expected
def UpperCamelCase( ):
lowerCAmelCase_ : Dict = [PokerHand(_UpperCAmelCase ) for hand in SORTED_HANDS]
lowerCAmelCase_ : Optional[Any] = poker_hands.copy()
shuffle(_UpperCAmelCase )
lowerCAmelCase_ : str = chain(sorted(_UpperCAmelCase ) )
for index, hand in enumerate(_UpperCAmelCase ):
assert hand == poker_hands[index]
def UpperCamelCase( ):
lowerCAmelCase_ : int = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=_UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase( ):
lowerCAmelCase_ : Tuple = PokerHand('''2C 4S AS 3D 5C''' )
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : int = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
lowerCAmelCase_ : List[str] = os.path.join(_UpperCAmelCase ,'''poker_hands.txt''' )
with open(_UpperCAmelCase ) as file_hand:
for line in file_hand:
lowerCAmelCase_ : Tuple = line[:14].strip()
lowerCAmelCase_ : Union[str, Any] = line[15:].strip()
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = PokerHand(_UpperCAmelCase ), PokerHand(_UpperCAmelCase )
lowerCAmelCase_ : Tuple = player.compare_with(_UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 171 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =None
a__ =BloomTokenizerFast
a__ =BloomTokenizerFast
a__ =True
a__ =False
a__ ='''tokenizer_file'''
a__ ={'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
_UpperCAmelCase : Any = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **A ) -> Any:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __lowerCAmelCase ( self ) -> List[Any]:
_UpperCAmelCase : Tuple = self.get_rust_tokenizer()
_UpperCAmelCase : Any = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
_UpperCAmelCase : Any = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_encode_plus(UpperCAmelCase__ )['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCAmelCase ( self , A=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCAmelCase : Dict = '''This is a simple input'''
_UpperCAmelCase : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCAmelCase : Optional[int] = ('''This is a simple input''', '''This is a pair''')
_UpperCAmelCase : List[str] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
_UpperCAmelCase : Any = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__ )
_UpperCAmelCase : Optional[Any] = next(iter(UpperCAmelCase__ ) )['''premise'''] # pick up one data
_UpperCAmelCase : List[str] = list(sample_data.values() )
_UpperCAmelCase : Optional[int] = list(map(tokenizer.encode , UpperCAmelCase__ ) )
_UpperCAmelCase : Tuple = [tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCAmelCase ( self ) -> Any:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 506 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
_UpperCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def SCREAMING_SNAKE_CASE__ ( self ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=0 ):
if str(UpperCAmelCase__ ).startswith('mps' ):
lowercase = torch.manual_seed(UpperCAmelCase__ )
else:
lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def SCREAMING_SNAKE_CASE__ ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 84 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = AlbertTokenizer
snake_case__ = AlbertTokenizerFast
snake_case__ = True
snake_case__ = True
snake_case__ = True
def UpperCAmelCase__ (self: Dict ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a : Dict = AlbertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
__a : Union[str, Any] = "this is a test"
__a : Any = "this is a test"
return input_text, output_text
def UpperCAmelCase__ (self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__a : List[str] = "<pad>"
__a : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def UpperCAmelCase__ (self: Union[str, Any] ) -> str:
'''simple docstring'''
__a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(UpperCAmelCase__ ) , 30000 )
def UpperCAmelCase__ (self: Tuple ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCAmelCase__ (self: Dict ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a : List[Any] = self.get_tokenizer()
__a : Optional[Any] = self.get_rust_tokenizer()
__a : Dict = "I was born in 92000, and this is falsé."
__a : Any = tokenizer.tokenize(UpperCAmelCase__ )
__a : Dict = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__a : str = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__a : Optional[int] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__a : Optional[Any] = self.get_rust_tokenizer()
__a : Optional[int] = tokenizer.encode(UpperCAmelCase__ )
__a : str = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase__ (self: Tuple ) -> str:
'''simple docstring'''
__a : Union[str, Any] = AlbertTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
__a : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCAmelCase__ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [48, 25, 21, 1289] )
__a : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__a : Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
__a : int = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def UpperCAmelCase__ (self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__a : Union[str, Any] = AlbertTokenizer(UpperCAmelCase__ )
__a : Dict = tokenizer.encode("sequence builders" )
__a : Dict = tokenizer.encode("multi-sequence build" )
__a : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
__a : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCAmelCase__ (self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__a : Tuple = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 351 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Dict):
def get_matched_characters(lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]) -> str:
A_ : str = []
A_ : int = min(len(_stra) , len(_stra)) // 2
for i, l in enumerate(_stra):
A_ : Optional[Any] = int(max(0 , i - limit))
A_ : Tuple = int(min(i + limit + 1 , len(_stra)))
if l in _stra[left:right]:
matched.append(_UpperCAmelCase)
A_ : Any = F'{_stra[0:_stra.index(_UpperCAmelCase)]} {_stra[_stra.index(_UpperCAmelCase) + 1:]}'
return "".join(_UpperCAmelCase)
# matching characters
A_ : Optional[Any] = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase)
A_ : Optional[Any] = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase)
A_ : Dict = len(_UpperCAmelCase)
# transposition
A_ : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase) if ca != ca]) // 2
)
if not match_count:
A_ : Union[str, Any] = 0.0
else:
A_ : Optional[int] = (
1
/ 3
* (
match_count / len(_UpperCAmelCase)
+ match_count / len(_UpperCAmelCase)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
A_ : List[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4]):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 665 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
'''simple docstring'''
from PIL import Image
def _lowerCamelCase ( lowercase : List[str] , lowercase : int ) -> Image:
_a = (259 * (level + 255)) / (255 * (259 - level))
def contrast(lowercase : Any ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowerCAmelCase_ : Tuple = change_contrast(img, 1_70)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 692 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=3 , _snake_case=224 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , ):
_UpperCAmelCase =size if size is not None else {"height": 18, "width": 18}
_UpperCAmelCase =parent
_UpperCAmelCase =batch_size
_UpperCAmelCase =num_channels
_UpperCAmelCase =image_size
_UpperCAmelCase =min_resolution
_UpperCAmelCase =max_resolution
_UpperCAmelCase =do_resize
_UpperCAmelCase =size
_UpperCAmelCase =do_normalize
_UpperCAmelCase =image_mean
_UpperCAmelCase =image_std
def SCREAMING_SNAKE_CASE ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =ViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =EfficientFormerImageProcessorTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
# Initialize image_processor
_UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase =prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase =image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase =image_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self ):
# Initialize image_processor
_UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase =prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCAmelCase =image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase =image_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self ):
# Initialize image_processor
_UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase =prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase =image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase =image_processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 408 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : Optional[int] = None ):
if start is None:
__lowercase = 0
if end is None:
__lowercase = len(_UpperCAmelCase ) - 1
if start >= end:
return
__lowercase = (start + end) // 2
slowsort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
slowsort(_UpperCAmelCase , mid + 1 , _UpperCAmelCase )
if sequence[end] < sequence[mid]:
__lowercase , __lowercase = sequence[mid], sequence[end]
slowsort(_UpperCAmelCase , _UpperCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 502 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( _a ):
lowercase_ = ['pixel_values']
def __init__( self : str , lowerCAmelCase_ : List[str] = True , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : Tuple = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[int] = True , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict = True , lowerCAmelCase_ : str = 1 / 2_55 , lowerCAmelCase_ : List[Any] = True , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Dict = True , **lowerCAmelCase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
_a = size if size is not None else {'''shortest_edge''': 2_24}
_a = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_a = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ , param_name='''crop_size''' )
_a = do_resize
_a = size
_a = resample
_a = do_center_crop
_a = crop_size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a = image_std if image_std is not None else OPENAI_CLIP_STD
_a = do_convert_rgb
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Any = PILImageResampling.BICUBIC , lowerCAmelCase_ : Union[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_a = get_resize_output_image_size(UpperCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
_a = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple = None , **lowerCAmelCase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : List[str] = ChannelDimension.FIRST , **lowerCAmelCase_ : Optional[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
_a = do_resize if do_resize is not None else self.do_resize
_a = size if size is not None else self.size
_a = get_size_dict(UpperCAmelCase__ , param_name='''size''' , default_to_square=UpperCAmelCase__ )
_a = resample if resample is not None else self.resample
_a = do_center_crop if do_center_crop is not None else self.do_center_crop
_a = crop_size if crop_size is not None else self.crop_size
_a = get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' , default_to_square=UpperCAmelCase__ )
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a = [convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
_a = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
_a = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
_a = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
_a = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
_a = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 0 |
_snake_case = 9.8_0_6_6_5
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ = g ) -> float:
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 382 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 0 |
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
snake_case : Optional[Any] = len(_UpperCAmelCase )
for i in range(1 , _UpperCAmelCase ):
snake_case : Tuple = collection[i]
snake_case : Any = 0
snake_case : Tuple = i - 1
while low <= high:
snake_case : Union[str, Any] = (low + high) // 2
if val < collection[mid]:
snake_case : int = mid - 1
else:
snake_case : Dict = mid + 1
for j in range(_UpperCAmelCase , _UpperCAmelCase , -1 ):
snake_case : Dict = collection[j - 1]
snake_case : Dict = val
return collection
if __name__ == "__main__":
__lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 204 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
from __future__ import annotations
from statistics import mean
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[Any] ):
lowerCAmelCase_ : Dict = [0] * no_of_processes
lowerCAmelCase_ : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_UpperCAmelCase ):
lowerCAmelCase_ : int = burst_time[i]
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[str] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Optional[int] = -1
for i in range(_UpperCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
lowerCAmelCase_ : str = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase_ : Tuple = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Dict = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : str = [0] * no_of_processes
for i in range(_UpperCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
A__ : Optional[int] = 4
A__ : str = [2, 5, 3, 7]
A__ : Any = [0, 0, 0, 0]
A__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
A__ : Optional[int] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 171 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : List[Any] = k_size // 2
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_UpperCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict ):
_UpperCAmelCase , _UpperCAmelCase : int = image.shape[0], image.shape[1]
# dst image height and width
_UpperCAmelCase : List[str] = height - k_size + 1
_UpperCAmelCase : List[str] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_UpperCAmelCase : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
_UpperCAmelCase : Union[str, Any] = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
_UpperCAmelCase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
_UpperCAmelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
_UpperCAmelCase : str = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Optional[int] = ravel(_UpperCAmelCase )
# reshape and get the dst image
_UpperCAmelCase : Tuple = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
_lowerCAmelCase :str = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
_lowerCAmelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCAmelCase :Optional[int] = gaussian_filter(gray, 3, sigma=1)
_lowerCAmelCase :List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 506 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase = '''hf-internal-testing/tiny-random-bert'''
UpperCAmelCase = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
UpperCAmelCase = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = cached_file(UpperCAmelCase__ , UpperCAmelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
with open(os.path.join(UpperCAmelCase__ , 'refs' , 'main' ) ) as f:
lowercase = f.read()
self.assertEqual(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , 'snapshots' , UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
# File is cached at the same place the second time.
lowercase = cached_file(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Using a specific revision to test the full commit hash.
lowercase = cached_file(UpperCAmelCase__ , UpperCAmelCase__ , revision='9b8c223' )
self.assertEqual(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , 'snapshots' , UpperCAmelCase__ , UpperCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
with self.assertRaisesRegex(UpperCAmelCase__ , 'is not a valid model identifier' ):
lowercase = cached_file('tiny-random-bert' , UpperCAmelCase__ )
with self.assertRaisesRegex(UpperCAmelCase__ , 'is not a valid git identifier' ):
lowercase = cached_file(UpperCAmelCase__ , UpperCAmelCase__ , revision='aaaa' )
with self.assertRaisesRegex(UpperCAmelCase__ , 'does not appear to have a file named' ):
lowercase = cached_file(UpperCAmelCase__ , 'conf' )
def SCREAMING_SNAKE_CASE__ ( self ):
with self.assertRaisesRegex(UpperCAmelCase__ , 'does not appear to have a file named' ):
lowercase = cached_file(UpperCAmelCase__ , 'conf' )
with open(os.path.join(UpperCAmelCase__ , 'refs' , 'main' ) ) as f:
lowercase = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '.no_exist' , UpperCAmelCase__ , 'conf' ) ) )
lowercase = cached_file(UpperCAmelCase__ , 'conf' , _raise_exceptions_for_missing_entries=UpperCAmelCase__ )
self.assertIsNone(UpperCAmelCase__ )
lowercase = cached_file(UpperCAmelCase__ , 'conf' , local_files_only=UpperCAmelCase__ , _raise_exceptions_for_missing_entries=UpperCAmelCase__ )
self.assertIsNone(UpperCAmelCase__ )
lowercase = mock.Mock()
lowercase = 500
lowercase = {}
lowercase = HTTPError
lowercase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase__ ) as mock_head:
lowercase = cached_file(UpperCAmelCase__ , 'conf' , _raise_exceptions_for_connection_errors=UpperCAmelCase__ )
self.assertIsNone(UpperCAmelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ):
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase__ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase__ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase__ , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , UpperCAmelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase__ , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , UpperCAmelCase__ , revision='ahaha' )
lowercase = get_file_from_repo('bert-base-cased' , UpperCAmelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase = json.loads(open(UpperCAmelCase__ , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 768 )
def SCREAMING_SNAKE_CASE__ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(UpperCAmelCase__ ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase__ , 'a.txt' ) , str(UpperCAmelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase__ , 'b.txt' ) )
| 84 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
def __init__(self: Tuple , *__UpperCAmelCase: Tuple , **__UpperCAmelCase: int ) -> None:
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 351 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 0 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
super().__init__()
A_ : Any = nn.Linear(3 ,4 )
A_ : List[Any] = nn.BatchNormad(4 )
A_ : str = nn.Linear(4 ,5 )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _a ( self : Optional[Any] ,_a : List[str] ,*_a : str ,**_a : Optional[Any] ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _a ( self : str ,_a : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
return output + 1
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[Any] = ModelForTest()
A_ : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook ,UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ ,"""_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,"""forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ ,"""_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ ,"""_old_forward""" ) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Tuple = ModelForTest()
A_ : Union[str, Any] = ModelHook()
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ ,append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook ,UpperCAmelCase__ ) ,UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) ,2 )
self.assertTrue(hasattr(UpperCAmelCase__ ,"""_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,"""forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ ,"""_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ ,"""_old_forward""" ) )
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[Any] = ModelForTest()
A_ : Any = torch.randn(2 ,3 )
A_ : Optional[int] = test_model(x + 1 )
A_ : Optional[Any] = test_model(x + 2 )
A_ : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ )
A_ : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ ,UpperCAmelCase__ ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A_ : Optional[int] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ )
A_ : Any = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ ,UpperCAmelCase__ ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A_ : int = SequentialHook(PreForwardHook() ,PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ )
A_ : int = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ ,UpperCAmelCase__ ,atol=1e-5 )
def _a ( self : Any ):
'''simple docstring'''
A_ : str = ModelForTest()
A_ : int = torch.randn(2 ,3 )
A_ : Union[str, Any] = test_model(UpperCAmelCase__ )
A_ : Optional[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ )
A_ : Any = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ ,output + 1 ,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A_ : Optional[int] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ )
A_ : List[Any] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ ,output + 1 ,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A_ : Dict = SequentialHook(PostForwardHook() ,PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ )
A_ : List[Any] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ ,output + 2 ,atol=1e-5 )
def _a ( self : int ):
'''simple docstring'''
A_ : Dict = ModelForTest()
A_ : int = torch.randn(2 ,3 )
A_ : List[str] = test_model(UpperCAmelCase__ )
A_ : str = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ ,UpperCAmelCase__ )
A_ : Optional[Any] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ ,output + 1 ) )
self.assertTrue(outputa.requires_grad )
A_ : Union[str, Any] = True
A_ : Tuple = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device(0 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A_ : Any = torch.randn(2 ,3 )
A_ : Optional[int] = model(UpperCAmelCase__ )
self.assertEqual(output.device ,torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ ,AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
A_ : List[str] = torch.randn(2 ,3 ).to(0 )
A_ : str = model(UpperCAmelCase__ )
self.assertEqual(output.device ,torch.device(0 ) )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
# This will move each submodule on different devices
A_ : Any = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara ,AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A_ : Tuple = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device ,UpperCAmelCase__ )
A_ : Optional[int] = torch.randn(2 ,3 )
A_ : Tuple = model(UpperCAmelCase__ )
self.assertEqual(output.device ,UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
# Now test with buffers included in the offload
A_ : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara ,AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device("""meta""" ) )
A_ : int = torch.randn(2 ,3 )
A_ : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device ,UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
def _a ( self : Any ):
'''simple docstring'''
A_ : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
# This will move each submodule on different devices
A_ : Dict = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ ,execution_device=UpperCAmelCase__ ,offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A_ : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device ,UpperCAmelCase__ )
A_ : int = torch.randn(2 ,3 )
A_ : Optional[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device ,UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ ,execution_device=UpperCAmelCase__ ,offload=UpperCAmelCase__ ,offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device("""meta""" ) )
A_ : Optional[int] = torch.randn(2 ,3 )
A_ : Dict = model(UpperCAmelCase__ )
self.assertEqual(output.device ,UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Dict = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
# This will move each submodule on different devices
A_ : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ ,execution_device=UpperCAmelCase__ ,offload=UpperCAmelCase__ ,weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A_ : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device ,UpperCAmelCase__ )
A_ : Any = torch.randn(2 ,3 )
A_ : Optional[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device ,UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ ,execution_device=UpperCAmelCase__ ,offload=UpperCAmelCase__ ,weights_map=model.state_dict() ,offload_buffers=UpperCAmelCase__ ,)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device("""meta""" ) )
A_ : int = torch.randn(2 ,3 )
A_ : Tuple = model(UpperCAmelCase__ )
self.assertEqual(output.device ,UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device ,torch.device("""cpu""" ) )
| 665 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BarthezTokenizer
__a =BarthezTokenizerFast
__a =True
__a =True
def UpperCamelCase__ ( self : Union[str, Any] ):
super().setUp()
_a = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCAmelCase__ )
_a = tokenizer
def UpperCamelCase__ ( self : Optional[int] ):
_a = "<pad>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(UpperCAmelCase__ ) , 10_11_22 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def UpperCamelCase__ ( self : Optional[int] ):
_a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_a = [0, 57, 30_18, 7_03_07, 91, 2]
_a = self.tokenizer(
UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = "I was born in 92000, and this is falsé."
_a = tokenizer.tokenize(UpperCAmelCase__ )
_a = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a = self.get_rust_tokenizer()
_a = tokenizer.encode(UpperCAmelCase__ )
_a = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCamelCase__ ( self : int ):
# fmt: off
_a = {"input_ids": [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a = [
"Le transformeur est un modèle d\'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=UpperCAmelCase__ , )
| 692 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
snake_case__ : Any = yaml.safe_load(
'\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
snake_case__ : Union[str, Any] = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
snake_case__ : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
snake_case__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
snake_case__ : Dict = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
snake_case__ : Dict = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
snake_case__ : Any = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
snake_case__ : Any = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
snake_case__ : Optional[int] = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
snake_case__ : Any = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
snake_case__ : List[str] = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
snake_case__ : int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
snake_case__ : str = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
snake_case__ : Optional[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
snake_case__ : Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
snake_case__ : Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
snake_case__ : List[Any] = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
snake_case__ : int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
snake_case__ : List[str] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
snake_case__ : Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
snake_case__ : List[str] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
snake_case__ : Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
snake_case__ : Dict = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
snake_case__ : Union[str, Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
snake_case__ : Tuple = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
snake_case__ : str = ''
snake_case__ : int = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
snake_case__ : Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
snake_case__ : Tuple = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
assert ReadMe.from_string(_UpperCAmelCase , _UpperCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->int:
with pytest.raises(_UpperCAmelCase , match=re.escape(expected_error.format(path="root" ) ) ):
_UpperCAmelCase =ReadMe.from_string(_UpperCAmelCase , _UpperCAmelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->str:
with pytest.raises(_UpperCAmelCase , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _lowerCamelCase ) ->Tuple:
ReadMe.from_string(_UpperCAmelCase , _UpperCAmelCase , suppress_parsing_errors=_UpperCAmelCase )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase =Path(_UpperCAmelCase ) / "README.md"
with open(_UpperCAmelCase , "w+" ) as readme_file:
readme_file.write(_UpperCAmelCase )
_UpperCAmelCase =ReadMe.from_readme(_UpperCAmelCase , _UpperCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->int:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase =Path(_UpperCAmelCase ) / "README.md"
with open(_UpperCAmelCase , "w+" ) as readme_file:
readme_file.write(_UpperCAmelCase )
_UpperCAmelCase =expected_error.format(path=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase , match=re.escape(_UpperCAmelCase ) ):
_UpperCAmelCase =ReadMe.from_readme(_UpperCAmelCase , _UpperCAmelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase =Path(_UpperCAmelCase ) / "README.md"
with open(_UpperCAmelCase , "w+" ) as readme_file:
readme_file.write(_UpperCAmelCase )
_UpperCAmelCase =expected_error.format(path=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase , match=re.escape(_UpperCAmelCase ) ):
ReadMe.from_readme(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase =Path(_UpperCAmelCase ) / "README.md"
with open(_UpperCAmelCase , "w+" ) as readme_file:
readme_file.write(_UpperCAmelCase )
ReadMe.from_readme(_UpperCAmelCase , _UpperCAmelCase , suppress_parsing_errors=_UpperCAmelCase )
| 408 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[Any] = "layoutlmv3"
def __init__(self ,_lowerCamelCase=50265 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=512 ,_lowerCamelCase=2 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-5 ,_lowerCamelCase=1 ,_lowerCamelCase=0 ,_lowerCamelCase=2 ,_lowerCamelCase=1024 ,_lowerCamelCase=128 ,_lowerCamelCase=128 ,_lowerCamelCase=True ,_lowerCamelCase=32 ,_lowerCamelCase=128 ,_lowerCamelCase=64 ,_lowerCamelCase=256 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=224 ,_lowerCamelCase=3 ,_lowerCamelCase=16 ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> Dict:
'''simple docstring'''
super().__init__(
vocab_size=UpperCAmelCase__ ,hidden_size=UpperCAmelCase__ ,num_hidden_layers=UpperCAmelCase__ ,num_attention_heads=UpperCAmelCase__ ,intermediate_size=UpperCAmelCase__ ,hidden_act=UpperCAmelCase__ ,hidden_dropout_prob=UpperCAmelCase__ ,attention_probs_dropout_prob=UpperCAmelCase__ ,max_position_embeddings=UpperCAmelCase__ ,type_vocab_size=UpperCAmelCase__ ,initializer_range=UpperCAmelCase__ ,layer_norm_eps=UpperCAmelCase__ ,pad_token_id=UpperCAmelCase__ ,bos_token_id=UpperCAmelCase__ ,eos_token_id=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = version.parse("1.12" )
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
return 1E-5
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
return 12
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = -1 ,_lowerCamelCase = -1 ,_lowerCamelCase = False ,_lowerCamelCase = None ,_lowerCamelCase = 3 ,_lowerCamelCase = 40 ,_lowerCamelCase = 40 ,) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor ,'''apply_ocr''' ,UpperCAmelCase__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
UpperCAmelCase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
__lowercase = compute_effective_axis_dimension(
UpperCAmelCase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
__lowercase = dict(
processor(
UpperCAmelCase__ ,text=UpperCAmelCase__ ,boxes=UpperCAmelCase__ ,return_tensors=UpperCAmelCase__ ,) )
return inputs
| 502 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = len(_UpperCAmelCase )
# We need to create solution object to save path.
_a = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_a = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('''\n'''.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_a = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
_a = 1
return True
_a = (not i < 0) and (not j < 0) # Check lower bounds
_a = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_a = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_a = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
_a = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Optional[int]:
__UpperCAmelCase : List[Any] = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase : List[Any] = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), _UpperCAmelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 382 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__(self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=7 , snake_case__ : Tuple=3 , snake_case__ : Any=18 , snake_case__ : Dict=30 , snake_case__ : Optional[Any]=4_00 , snake_case__ : Union[str, Any]=True , snake_case__ : str=None , snake_case__ : Optional[int]=True , ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = size if size is not None else {"height": 18, "width": 18}
snake_case : Union[str, Any] = parent
snake_case : Optional[Any] = batch_size
snake_case : Union[str, Any] = num_channels
snake_case : str = image_size
snake_case : Tuple = min_resolution
snake_case : Optional[int] = max_resolution
snake_case : List[Any] = do_resize
snake_case : Optional[int] = size
snake_case : Union[str, Any] = apply_ocr
def _SCREAMING_SNAKE_CASE (self : Any ) -> Optional[int]:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "apply_ocr" ) )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
snake_case : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _SCREAMING_SNAKE_CASE (self : str ) -> Any:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case : List[str] = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> int:
'''simple docstring'''
snake_case : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : int = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
snake_case : Dict = Image.open(ds[0]["file"] ).convert("RGB" )
snake_case : Optional[int] = image_processing(UpperCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Dict = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
snake_case : Union[str, Any] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : List[Any] = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 204 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = LxmertTokenizer
_a = LxmertTokenizerFast
_a = True
_a = True
def UpperCAmelCase__ ( self : Tuple):
super().setUp()
lowerCAmelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def UpperCAmelCase__ ( self : Any , A_ : Optional[Any]):
lowerCAmelCase_ : Tuple = '''UNwant\u00E9d,running'''
lowerCAmelCase_ : Tuple = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Union[str, Any] = self.tokenizer_class(self.vocab_file)
lowerCAmelCase_ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(UpperCAmelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [7, 4, 5, 1_0, 8, 9])
def UpperCAmelCase__ ( self : Any):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : int = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(UpperCAmelCase__)
lowerCAmelCase_ : Any = rust_tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
lowerCAmelCase_ : List[str] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
lowerCAmelCase_ : str = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
lowerCAmelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : List[Any] = tokenizer.encode(UpperCAmelCase__)
lowerCAmelCase_ : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
| 171 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
"""simple docstring"""
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Any = 0
_UpperCAmelCase : List[str] = {}
def __lowerCAmelCase ( self , A ) -> Tuple:
if vertex not in self.adjacency:
_UpperCAmelCase : Optional[Any] = {}
self.num_vertices += 1
def __lowerCAmelCase ( self , A , A , A ) -> List[Any]:
self.add_vertex(UpperCAmelCase__ )
self.add_vertex(UpperCAmelCase__ )
if head == tail:
return
_UpperCAmelCase : Any = weight
_UpperCAmelCase : List[str] = weight
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : Dict = self.get_edges()
for edge in edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCAmelCase__ ) ):
_UpperCAmelCase : List[str] = list(edges[i] )
edges.sort(key=lambda A : e[2] )
for i in range(len(UpperCAmelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_UpperCAmelCase : List[str] = edges[i][2] + 1
for edge in edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = edge
_UpperCAmelCase : Dict = weight
_UpperCAmelCase : int = weight
def __str__( self ) -> Dict:
_UpperCAmelCase : Any = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCAmelCase : List[Any] = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip('''\n''' )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self.adjacency.keys()
@staticmethod
def __lowerCAmelCase ( A=None , A=None ) -> Any:
_UpperCAmelCase : int = Graph()
if vertices is None:
_UpperCAmelCase : Optional[int] = []
if edges is None:
_UpperCAmelCase : List[Any] = []
for vertex in vertices:
g.add_vertex(UpperCAmelCase__ )
for edge in edges:
g.add_edge(*UpperCAmelCase__ )
return g
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> str:
_UpperCAmelCase : str = {}
_UpperCAmelCase : Dict = {}
def __len__( self ) -> Dict:
return len(self.parent )
def __lowerCAmelCase ( self , A ) -> Optional[int]:
if item in self.parent:
return self.find(UpperCAmelCase__ )
_UpperCAmelCase : int = item
_UpperCAmelCase : Dict = 0
return item
def __lowerCAmelCase ( self , A ) -> Optional[int]:
if item not in self.parent:
return self.make_set(UpperCAmelCase__ )
if item != self.parent[item]:
_UpperCAmelCase : int = self.find(self.parent[item] )
return self.parent[item]
def __lowerCAmelCase ( self , A , A ) -> str:
_UpperCAmelCase : Any = self.find(UpperCAmelCase__ )
_UpperCAmelCase : Any = self.find(UpperCAmelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCAmelCase : List[str] = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCAmelCase : str = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCAmelCase : List[Any] = roota
return roota
return None
@staticmethod
def __lowerCAmelCase ( A ) -> Any:
_UpperCAmelCase : Union[str, Any] = graph.num_vertices
_UpperCAmelCase : Optional[Any] = Graph.UnionFind()
_UpperCAmelCase : Any = []
while num_components > 1:
_UpperCAmelCase : Union[str, Any] = {}
for vertex in graph.get_vertices():
_UpperCAmelCase : Optional[Any] = -1
_UpperCAmelCase : Any = graph.get_edges()
for edge in edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = edge
_UpperCAmelCase : Tuple = union_find.find(UpperCAmelCase__ )
_UpperCAmelCase : Optional[int] = union_find.find(UpperCAmelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : Dict = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(UpperCAmelCase__ ) != union_find.find(UpperCAmelCase__ ):
union_find.union(UpperCAmelCase__ , UpperCAmelCase__ )
mst_edges.append(cheap_edge[vertex] )
_UpperCAmelCase : str = num_components - 1
_UpperCAmelCase : Optional[int] = Graph.build(edges=UpperCAmelCase__ )
return mst
| 506 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
UpperCAmelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
UpperCAmelCase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase_ ( ):
lowercase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase = bs[:]
lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
return pairs
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ):
lowercase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
lowercase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
lowercase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
lowercase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
lowercase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
lowercase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , )
with open(UpperCAmelCase__ , encoding='utf-8' ) as vocab_handle:
lowercase = json.load(UpperCAmelCase__ )
lowercase = {v: k for k, v in self.encoder.items()}
lowercase = errors # how to handle errors in decoding
lowercase = bytes_to_unicode()
lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase__ , encoding='utf-8' ) as merges_handle:
lowercase = merges_handle.read().split('\n' )[1:-1]
lowercase = [tuple(merge.split() ) for merge in bpe_merges]
lowercase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowercase = {}
lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if token in self.cache:
return self.cache[token]
lowercase = tuple(UpperCAmelCase__ )
lowercase = get_pairs(UpperCAmelCase__ )
if not pairs:
return token
while True:
lowercase = min(UpperCAmelCase__ , key=lambda snake_case : self.bpe_ranks.get(UpperCAmelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase = bigram
lowercase = []
lowercase = 0
while i < len(UpperCAmelCase__ ):
try:
lowercase = word.index(UpperCAmelCase__ , UpperCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase = j
if word[i] == first and i < len(UpperCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(UpperCAmelCase__ )
lowercase = new_word
if len(UpperCAmelCase__ ) == 1:
break
else:
lowercase = get_pairs(UpperCAmelCase__ )
lowercase = ' '.join(UpperCAmelCase__ )
lowercase = word
return word
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
for token in re.findall(self.pat , UpperCAmelCase__ ):
lowercase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase__ ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.encoder.get(UpperCAmelCase__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.decoder.get(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ''.join(UpperCAmelCase__ )
lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + '\n' )
lowercase = 0
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowercase = token_index
writer.write(' '.join(UpperCAmelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=False , **snake_case ):
lowercase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase__ ) > 0 and not text[0].isspace()):
lowercase = ' ' + text
return (text, kwargs)
| 84 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class snake_case_ ( __UpperCamelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["""input_ids""", """attention_mask"""]
snake_case__ = RobertaTokenizer
def __init__(self: List[Any] , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: Any=None , __UpperCAmelCase: Any=None , __UpperCAmelCase: Union[str, Any]="replace" , __UpperCAmelCase: str="<s>" , __UpperCAmelCase: str="</s>" , __UpperCAmelCase: Dict="</s>" , __UpperCAmelCase: Union[str, Any]="<s>" , __UpperCAmelCase: Optional[Any]="<unk>" , __UpperCAmelCase: Optional[int]="<pad>" , __UpperCAmelCase: str="<mask>" , __UpperCAmelCase: List[Any]=False , __UpperCAmelCase: int=True , **__UpperCAmelCase: Tuple , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
__a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase__ ) != add_prefix_space:
__a : Any = getattr(UpperCAmelCase__ , pre_tok_state.pop("type" ) )
__a : Any = add_prefix_space
__a : List[str] = pre_tok_class(**UpperCAmelCase__ )
__a : List[str] = add_prefix_space
__a : str = "post_processor"
__a : Any = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
__a : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a : List[Any] = tuple(state["sep"] )
if "cls" in state:
__a : Dict = tuple(state["cls"] )
__a : Union[str, Any] = False
if state.get("add_prefix_space" , UpperCAmelCase__ ) != add_prefix_space:
__a : List[Any] = add_prefix_space
__a : Any = True
if state.get("trim_offsets" , UpperCAmelCase__ ) != trim_offsets:
__a : Union[str, Any] = trim_offsets
__a : Optional[Any] = True
if changes_to_apply:
__a : Optional[int] = getattr(UpperCAmelCase__ , state.pop("type" ) )
__a : Dict = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
def UpperCAmelCase__ (self: str ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ (self: Union[str, Any] , __UpperCAmelCase: Dict ) -> Union[str, Any]:
'''simple docstring'''
__a : int = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
__a : int = value
def UpperCAmelCase__ (self: str , *__UpperCAmelCase: List[Any] , **__UpperCAmelCase: Dict ) -> BatchEncoding:
'''simple docstring'''
__a : List[str] = kwargs.get("is_split_into_words" , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase__ (self: List[str] , *__UpperCAmelCase: List[str] , **__UpperCAmelCase: int ) -> BatchEncoding:
'''simple docstring'''
__a : Optional[int] = kwargs.get("is_split_into_words" , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase__ (self: Any , __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict = None ) -> Tuple[str]:
'''simple docstring'''
__a : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def UpperCAmelCase__ (self: List[str] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Any=None ) -> Optional[Any]:
'''simple docstring'''
__a : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: List[str] , __UpperCAmelCase: List[str] = None ) -> List[int]:
'''simple docstring'''
__a : Optional[Any] = [self.sep_token_id]
__a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 351 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowercase : int , lowercase : Any , lowercase : str ) -> Optional[Any]:
if openai_config_file == "":
_a = OpenAIGPTConfig()
else:
_a = OpenAIGPTConfig.from_json_file(_UpperCAmelCase )
_a = OpenAIGPTModel(_UpperCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
_a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_a = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
lowerCAmelCase_ : Dict = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 692 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 0 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=A__ ):
"""simple docstring"""
snake_case =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *_snake_case , **_snake_case ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_snake_case , **_snake_case ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , *_snake_case , **_snake_case ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 408 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 502 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_snake_case : Optional[Any] = numpy.array([0, 0])
_snake_case : Tuple = numpy.array([0.5, 0.866_0254])
_snake_case : Any = numpy.array([1, 0])
_snake_case : str = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_a = initial_vectors
for _ in range(_UpperCAmelCase ):
_a = iteration_step(_UpperCAmelCase )
return vectors
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = []
for i, start_vector in enumerate(vectors[:-1] ):
_a = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
_a = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
_a = numpy.radians(_UpperCAmelCase )
_a , _a = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
_a = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Optional[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _snake_case ( _lowercase ):
lowerCamelCase__: Tuple = 42
lowerCamelCase__: str = 42
class _snake_case ( _lowercase , _lowercase ):
lowerCamelCase__: Tuple = 1
@register_to_config
def __init__( self: Optional[Any] , __lowerCamelCase: Optional[Any] = 20_00 , __lowerCamelCase: Union[str, Any] = 0.15 , __lowerCamelCase: Any = 0.01 , __lowerCamelCase: Any = 13_48.0 , __lowerCamelCase: Optional[int] = 1e-5 , __lowerCamelCase: int = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
__UpperCAmelCase : Tuple = sigma_max
# setable values
__UpperCAmelCase : Any = None
self.set_sigmas(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: int = None ) -> torch.FloatTensor:
return sample
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: List[Any] = None , __lowerCamelCase: Optional[Any] = None ) -> Union[str, Any]:
__UpperCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__UpperCAmelCase : Optional[Any] = torch.linspace(1 , UpperCAmelCase__ , UpperCAmelCase__ , device=UpperCAmelCase__ )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] = None , __lowerCamelCase: List[Any] = None , __lowerCamelCase: int = None ) -> Optional[Any]:
__UpperCAmelCase : Dict = sigma_min if sigma_min is not None else self.config.sigma_min
__UpperCAmelCase : Tuple = sigma_max if sigma_max is not None else self.config.sigma_max
__UpperCAmelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCAmelCase__ , UpperCAmelCase__ )
__UpperCAmelCase : Tuple = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__UpperCAmelCase : Union[str, Any] = torch.exp(torch.linspace(math.log(UpperCAmelCase__ ) , math.log(UpperCAmelCase__ ) , UpperCAmelCase__ ) )
__UpperCAmelCase : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowerCamelCase ( self: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> Any:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _lowerCamelCase ( self: str , __lowerCamelCase: Tuple , __lowerCamelCase: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Dict = None , __lowerCamelCase: Dict = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler" )
__UpperCAmelCase : List[str] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__UpperCAmelCase : List[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__UpperCAmelCase : Optional[Any] = timesteps.to(self.discrete_sigmas.device )
__UpperCAmelCase : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
__UpperCAmelCase : Dict = self.get_adjacent_sigma(UpperCAmelCase__ , UpperCAmelCase__ ).to(sample.device )
__UpperCAmelCase : Union[str, Any] = torch.zeros_like(UpperCAmelCase__ )
__UpperCAmelCase : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__UpperCAmelCase : Any = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__UpperCAmelCase : List[Any] = diffusion.unsqueeze(-1 )
__UpperCAmelCase : List[str] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__UpperCAmelCase : int = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCAmelCase__ , device=sample.device , dtype=sample.dtype )
__UpperCAmelCase : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__UpperCAmelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCAmelCase__ , prev_sample_mean=UpperCAmelCase__ )
def _lowerCamelCase ( self: int , __lowerCamelCase: str , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] = None , __lowerCamelCase: Dict = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__UpperCAmelCase : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCAmelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__UpperCAmelCase : str = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCAmelCase : Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCAmelCase : List[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__UpperCAmelCase : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__UpperCAmelCase : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__UpperCAmelCase : Tuple = step_size.unsqueeze(-1 )
__UpperCAmelCase : Any = sample + step_size * model_output
__UpperCAmelCase : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase__ )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Any , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__UpperCAmelCase : Tuple = timesteps.to(original_samples.device )
__UpperCAmelCase : Tuple = self.discrete_sigmas.to(original_samples.device )[timesteps]
__UpperCAmelCase : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCAmelCase__ ) * sigmas[:, None, None, None]
)
__UpperCAmelCase : Union[str, Any] = noise + original_samples
return noisy_samples
def __len__( self: List[Any] ) -> List[Any]:
return self.config.num_train_timesteps
| 382 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 0 |
import math
def UpperCamelCase ( __lowerCamelCase : Dict ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase ( __lowerCamelCase : Any = 10001 ):
try:
snake_case : List[str] = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
snake_case : Any = []
snake_case : Optional[Any] = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 204 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : List[Any] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( UpperCamelCase_ ):
_a = '''camembert'''
def __init__( self : Any , A_ : Tuple=3_0_5_2_2 , A_ : str=7_6_8 , A_ : Tuple=1_2 , A_ : Optional[Any]=1_2 , A_ : Any=3_0_7_2 , A_ : List[str]="gelu" , A_ : Tuple=0.1 , A_ : int=0.1 , A_ : List[str]=5_1_2 , A_ : int=2 , A_ : List[Any]=0.02 , A_ : str=1e-12 , A_ : Tuple=1 , A_ : str=0 , A_ : Any=2 , A_ : str="absolute" , A_ : str=True , A_ : Optional[int]=None , **A_ : Optional[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : int = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Tuple = type_vocab_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : List[Any] = layer_norm_eps
lowerCAmelCase_ : List[str] = position_embedding_type
lowerCAmelCase_ : List[str] = use_cache
lowerCAmelCase_ : Optional[Any] = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : List[Any]):
if self.task == "multiple-choice":
lowerCAmelCase_ : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 171 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCAmelCase :Any = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class _UpperCAmelCase ( unittest.TestCase ,a ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : int = load_tool('''text-question-answering''' )
self.tool.setup()
_UpperCAmelCase : Tuple = load_tool('''text-question-answering''' , remote=UpperCAmelCase__ )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Optional[int] = self.tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' )
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : List[Any] = self.remote_tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' )
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = self.tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : List[Any] = self.remote_tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''' )
| 506 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=2 , snake_case=32 , snake_case=16 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=4 , snake_case=[0, 1, 2, 3] , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=3 , snake_case=[1, 384, 24, 24] , snake_case=True , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = backbone_out_indices
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = num_labels
lowercase = backbone_featmap_shape
lowercase = scope
lowercase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase = (image_size // patch_size) ** 2
lowercase = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCAmelCase__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = DPTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = DPTForDepthEstimation(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.num_labels
lowercase = DPTForSemanticSegmentation(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_UpperCamelCase : Any = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : str = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DPTModelTester(self )
lowercase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(UpperCAmelCase__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
if model_class in get_values(UpperCAmelCase__ ):
continue
lowercase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
lowercase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase = model(**UpperCAmelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = False
lowercase = True
if model_class in get_values(UpperCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
lowercase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
lowercase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
lowercase = model(**UpperCAmelCase__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(UpperCAmelCase__ )
for model_class in self.all_model_classes:
lowercase = model_class(config=UpperCAmelCase__ )
# Skip the check for the backbone
lowercase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase = DPTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 'add'
with self.assertRaises(UpperCAmelCase__ ):
lowercase = DPTForDepthEstimation(UpperCAmelCase__ )
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
lowercase = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(UpperCAmelCase__ )
lowercase = prepare_img()
lowercase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase = model(**UpperCAmelCase__ )
lowercase = outputs.predicted_depth
# verify the predicted depth
lowercase = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , UpperCAmelCase__ )
lowercase = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , UpperCAmelCase__ , atol=1E-4 ) )
| 84 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
from timeit import timeit
def a_ (__A ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
__a : Optional[Any] = 0
while number:
number &= number - 1
result += 1
return result
def a_ (__A ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError("the value of input must not be negative" )
__a : List[str] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a_ () -> None:
"""simple docstring"""
def do_benchmark(__A ) -> None:
__a : Any = "import __main__ as z"
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }' )
__a : Optional[int] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }' )
__a : Dict = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(f'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 351 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__magic_name__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Dict):
for attribute in key.split("""."""):
A_ : List[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase)
if weight_type is not None:
A_ : str = getattr(_UpperCAmelCase , _UpperCAmelCase).shape
else:
A_ : str = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
A_ : int = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Union[str, Any] = value
elif weight_type == "bias":
A_ : Tuple = value
else:
A_ : str = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict):
A_ : int = []
A_ : Any = fairseq_model.state_dict()
A_ : Any = hf_model.feature_extractor
A_ : Optional[Any] = hf_model.adapter
for name, value in fairseq_dict.items():
A_ : str = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
A_ : Optional[int] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""]):
load_adapter(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
A_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
A_ : Any = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(_UpperCAmelCase)[0].split(""".""")[-2]
A_ : Union[str, Any] = mapped_key.replace("""*""" , _UpperCAmelCase)
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Dict = """weight_v"""
elif "bias" in name:
A_ : Any = """bias"""
elif "weight" in name:
A_ : Dict = """weight"""
else:
A_ : Dict = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
continue
if not is_used:
unused_weights.append(_UpperCAmelCase)
logger.warning(F'Unused weights: {unused_weights}')
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict):
A_ : Tuple = full_name.split("""conv_layers.""")[-1]
A_ : Dict = name.split(""".""")
A_ : Union[str, Any] = int(items[0])
A_ : Any = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A_ : Union[str, Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A_ : Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A_ : int = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A_ : List[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(_UpperCAmelCase)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Optional[Any]):
A_ : List[str] = full_name.split("""adaptor.""")[-1]
A_ : Tuple = name.split(""".""")
if items[1].isdigit():
A_ : List[str] = int(items[1])
else:
A_ : Optional[int] = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A_ : Union[str, Any] = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.')
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A_ : List[str] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A_ : Tuple = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.')
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A_ : Optional[Any] = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.')
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A_ : Any = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.')
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A_ : int = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.')
else:
unused_weights.append(_UpperCAmelCase)
def lowerCamelCase ( lowerCamelCase : str):
A_ , A_ : List[Any] = emb.weight.shape
A_ : Union[str, Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
A_ : Dict = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , ):
A_ : List[str] = WavaVecaConfig.from_pretrained(
_UpperCAmelCase , add_adapter=_UpperCAmelCase , adapter_stride=_UpperCAmelCase , adapter_kernel_size=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , output_hidden_size=_UpperCAmelCase , )
A_ : List[str] = MBartConfig.from_pretrained(_UpperCAmelCase)
# load model
A_ , A_ , A_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""")[:-1]),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
A_ : Union[str, Any] = model[0].eval()
# load feature extractor
A_ : Any = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase , use_auth_token=_UpperCAmelCase)
# set weights for wav2vec2 encoder
A_ : Any = WavaVecaModel(_UpperCAmelCase)
recursively_load_weights_wavaveca(model.encoder , _UpperCAmelCase)
# load decoder weights
A_ : Union[str, Any] = MBartForCausalLM(_UpperCAmelCase)
A_ , A_ : str = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCAmelCase)
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}')
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}')
A_ : Optional[Any] = SpeechEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase)
A_ : str = False
A_ : str = MBartaaTokenizer(_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
A_ : Tuple = hf_wavavec.config.to_dict()
A_ : Any = tokenizer.pad_token_id
A_ : List[Any] = tokenizer.bos_token_id
A_ : List[Any] = tokenizer.eos_token_id
A_ : Any = """mbart50"""
A_ : Optional[int] = """wav2vec2"""
A_ : Optional[Any] = tokenizer.eos_token_id
A_ : Union[str, Any] = 25_0004
A_ : Dict = tokenizer.eos_token_id
A_ : List[Any] = SpeechEncoderDecoderConfig.from_dict(_UpperCAmelCase)
hf_wavavec.save_pretrained(_UpperCAmelCase)
feature_extractor.save_pretrained(_UpperCAmelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1_024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=250_004, type=int, help='`decoder_start_token_id` of model config')
__magic_name__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 665 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Union[str, Any] ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_a = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 692 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _a ( A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =tempfile.mkdtemp()
_UpperCAmelCase =5
# Realm tok
_UpperCAmelCase =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCAmelCase =os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
_UpperCAmelCase =os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCAmelCase =os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def SCREAMING_SNAKE_CASE ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=UpperCAmelCase__ , )
return block_records
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_config()
_UpperCAmelCase =self.get_dummy_retriever()
_UpperCAmelCase =retriever.tokenizer
_UpperCAmelCase =np.array([0, 3] , dtype="long" )
_UpperCAmelCase =tokenizer(["Test question"] ).input_ids
_UpperCAmelCase =tokenizer(
["the fourth"] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
_UpperCAmelCase =config.reader_seq_len
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors="np" )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_config()
_UpperCAmelCase =self.get_dummy_retriever()
_UpperCAmelCase =retriever.tokenizer
_UpperCAmelCase =np.array([0, 3, 5] , dtype="long" )
_UpperCAmelCase =tokenizer(["Test question"] ).input_ids
_UpperCAmelCase =tokenizer(
["the fourth", "longer longer"] , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ).input_ids
_UpperCAmelCase =config.reader_seq_len
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =retriever(
UpperCAmelCase__ , UpperCAmelCase__ , answer_ids=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors="np" )
self.assertEqual([False, True, True] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCAmelCase =retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCAmelCase =os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCAmelCase =RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 408 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__lowercase = mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__lowercase = max(
mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
__lowercase = val
return f[i][j]
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
__lowercase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__lowercase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__lowercase = dp[i - 1][w_]
return dp[n][w_], dp
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] ):
if not (isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
__lowercase = len(_UpperCAmelCase )
if num_items != len(_UpperCAmelCase ):
__lowercase = (
'''The number of weights must be the same as the number of values.\n'''
f"But got {num_items} weights and {len(_UpperCAmelCase )} values"
)
raise ValueError(_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
if not isinstance(wt[i] , _UpperCAmelCase ):
__lowercase = (
'''All weights must be integers but got weight of '''
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(_UpperCAmelCase )
__lowercase , __lowercase = knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = set()
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return optimal_val, example_optional_set
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
else:
optimal_set.add(_UpperCAmelCase )
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , j - wt[i - 1] , _UpperCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = [3, 2, 4, 4]
_SCREAMING_SNAKE_CASE = [4, 3, 2, 3]
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 6
_SCREAMING_SNAKE_CASE = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 502 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 0 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A ( _a ):
lowercase_ = ['vqvae']
def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , mel=UpperCAmelCase__ , vqvae=UpperCAmelCase__ )
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , UpperCAmelCase__ ) else 10_00
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : Any = 0 , lowerCAmelCase_ : Dict = 0 , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : str = 0 , lowerCAmelCase_ : Tuple = 0 , lowerCAmelCase_ : List[Any] = None , lowerCAmelCase_ : Optional[Any] = 0 , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : List[str]=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
_a = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase__ )
_a = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_a = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_a = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase__ , device=self.device , )
_a = noise
_a = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase__ , UpperCAmelCase__ )
_a = self.mel.audio_slice_to_image(UpperCAmelCase__ )
_a = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_a = (input_image / 2_55) * 2 - 1
_a = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_a = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase__ , 0 ) ).latent_dist.sample(
generator=UpperCAmelCase__ )[0]
_a = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_a = self.scheduler.add_noise(UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
_a = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_a = int(mask_start_secs * pixels_per_second )
_a = int(mask_end_secs * pixels_per_second )
_a = self.scheduler.add_noise(UpperCAmelCase__ , UpperCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCAmelCase__ ):
_a = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )['''sample''']
else:
_a = self.unet(UpperCAmelCase__ , UpperCAmelCase__ )['''sample''']
if isinstance(self.scheduler , UpperCAmelCase__ ):
_a = self.scheduler.step(
model_output=UpperCAmelCase__ , timestep=UpperCAmelCase__ , sample=UpperCAmelCase__ , eta=UpperCAmelCase__ , generator=UpperCAmelCase__ , )['''prev_sample''']
else:
_a = self.scheduler.step(
model_output=UpperCAmelCase__ , timestep=UpperCAmelCase__ , sample=UpperCAmelCase__ , generator=UpperCAmelCase__ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_a = mask[:, step, :, :mask_start]
if mask_end > 0:
_a = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_a = 1 / self.vqvae.config.scaling_factor * images
_a = self.vqvae.decode(UpperCAmelCase__ )['''sample''']
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_a = (images * 2_55).round().astype('''uint8''' )
_a = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase__ , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_a = [self.mel.image_to_audio(UpperCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCAmelCase__ ) )
@torch.no_grad()
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ )
_a = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_a = (sample / 2_55) * 2 - 1
_a = torch.Tensor(UpperCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_a = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_a = self.scheduler.alphas_cumprod[t]
_a = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_a = 1 - alpha_prod_t
_a = self.unet(UpperCAmelCase__ , UpperCAmelCase__ )['''sample''']
_a = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_a = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_a = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ) -> torch.Tensor:
"""simple docstring"""
_a = acos(torch.dot(torch.flatten(UpperCAmelCase__ ) , torch.flatten(UpperCAmelCase__ ) ) / torch.norm(UpperCAmelCase__ ) / torch.norm(UpperCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCAmelCase__ ) + sin(alpha * theta ) * xa / sin(UpperCAmelCase__ )
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def _UpperCamelCase ( snake_case__ ) -> Optional[MinHash]:
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase : int = MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def _UpperCamelCase ( snake_case__ ) -> Set[str]:
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class _snake_case :
def __init__( self: Tuple , *,
__lowerCamelCase: Dict = 0.85 , ) -> Dict:
__UpperCAmelCase : List[str] = duplication_jaccard_threshold
__UpperCAmelCase : Optional[Any] = NUM_PERM
__UpperCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCAmelCase : int = defaultdict(UpperCAmelCase__ )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[Any] , __lowerCamelCase: List[str] ) -> None:
__UpperCAmelCase : Optional[int] = self._index.query(UpperCAmelCase__ )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(UpperCAmelCase__ , UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(UpperCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(UpperCAmelCase__ )
def _lowerCamelCase ( self: List[str] ) -> List[List[Dict]]:
__UpperCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase : Optional[Any] = [base] + list(UpperCAmelCase__ )
# reformat the cluster to be a list of dict
__UpperCAmelCase : str = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(UpperCAmelCase__ )
return duplicate_clusters
def _lowerCamelCase ( self: Any , __lowerCamelCase: Tuple ) -> None:
__UpperCAmelCase : List[Any] = self.get_duplicate_clusters()
with open(UpperCAmelCase__ , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = element
__UpperCAmelCase : int = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _UpperCamelCase ( snake_case__ ) -> Optional[int]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash, ThreadedIterator(_UpperCAmelCase, max_queue_size=1_0000 ), chunksize=100, ):
if data is not None:
yield data
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : str = DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ), max_queue_size=100 ) ):
di.add(_UpperCAmelCase, _UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> float:
__UpperCAmelCase : List[Any] = get_tokens(_UpperCAmelCase )
__UpperCAmelCase : List[str] = get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def _UpperCamelCase ( snake_case__, snake_case__ ) -> int:
__UpperCAmelCase : str = []
for elementa in cluster:
__UpperCAmelCase : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
__UpperCAmelCase : Dict = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_UpperCAmelCase, _UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase : List[str] = 1
extremes.append(_UpperCAmelCase )
return extremes
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> str:
global _shared_dataset
__UpperCAmelCase : int = dataset
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[int] = partial(_find_cluster_extremes_shared, jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase, _UpperCAmelCase, ), total=len(_UpperCAmelCase ), ):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def _UpperCamelCase ( snake_case__, snake_case__ = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__UpperCAmelCase : Dict = make_duplicate_clusters(_UpperCAmelCase, _UpperCAmelCase )
__UpperCAmelCase : int = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : Any = find_extremes(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase : Any = element
__UpperCAmelCase : Dict = duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase : Tuple = dataset.filter(lambda snake_case__, snake_case__ : idx not in remove_indices, with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase : Optional[Any] = element["base_index"] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase : List[str] = extreme_dict[element["base_index"]]["copies"]
print(f'''Original dataset size: {len(_UpperCAmelCase )}''' )
print(f'''Number of duplicate clusters: {len(_UpperCAmelCase )}''' )
print(f'''Files in duplicate cluster: {len(_UpperCAmelCase )}''' )
print(f'''Unique files in duplicate cluster: {len(_UpperCAmelCase )}''' )
print(f'''Filtered dataset size: {len(_UpperCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 382 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
def UpperCamelCase ( __lowerCamelCase : Any = 4000000 ):
snake_case : Optional[int] = [0, 1]
snake_case : Optional[int] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
snake_case : str = 0
for j in range(len(_UpperCAmelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 204 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A__ : Dict = logging.getLogger(__name__)
def UpperCamelCase( ):
lowerCAmelCase_ : Any = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' ,type=_UpperCAmelCase ,default='''data/dump.txt''' ,help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' ,type=_UpperCAmelCase ,default='''bert''' ,choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' ,type=_UpperCAmelCase ,default='''bert-base-uncased''' ,help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' ,type=_UpperCAmelCase ,default='''data/dump''' ,help='''The dump file prefix.''' )
lowerCAmelCase_ : List[Any] = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowerCAmelCase_ : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : Any = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCAmelCase_ : str = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase_ : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCAmelCase_ : Tuple = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase_ : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : int = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCAmelCase_ : Dict = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path ,'''r''' ,encoding='''utf8''' ) as fp:
lowerCAmelCase_ : str = fp.readlines()
logger.info('''Start encoding''' )
logger.info(f"""{len(_UpperCAmelCase )} examples to process.""" )
lowerCAmelCase_ : str = []
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : List[Any] = 10000
lowerCAmelCase_ : Union[str, Any] = time.time()
for text in data:
lowerCAmelCase_ : Dict = f"""{bos} {text.strip()} {sep}"""
lowerCAmelCase_ : int = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase_ : Dict = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowerCAmelCase_ : Tuple = time.time()
logger.info('''Finished binarization''' )
logger.info(f"""{len(_UpperCAmelCase )} examples processed.""" )
lowerCAmelCase_ : Dict = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowerCAmelCase_ : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase_ : Any = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase_ : Any = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(_UpperCAmelCase ,'''wb''' ) as handle:
pickle.dump(rslt_ ,_UpperCAmelCase ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 171 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase :str = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Tuple = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[Any] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :int = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 506 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 50 ):
lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a_ (__A ) -> Optional[int]:
"""simple docstring"""
def is_in_circle(__A , __A ) -> bool:
__a : str = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__a : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
__a : Optional[Any] = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def a_ (__A , __A , __A = 0.0 , __A = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_UpperCAmelCase , _UpperCAmelCase ) ) for _ in range(_UpperCAmelCase ) ) * (max_value - min_value)
def a_ (__A , __A = 0.0 , __A = 1.0 ) -> None:
"""simple docstring"""
def identity_function(__A ) -> float:
return x
__a : Union[str, Any] = area_under_curve_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : Optional[Any] = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def a_ (__A ) -> None:
"""simple docstring"""
def function_to_integrate(__A ) -> float:
return sqrt(4.0 - x * x )
__a : Union[str, Any] = area_under_curve_estimator(
_UpperCAmelCase , _UpperCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple ,_a : Union[str, Any] ,_a : Any ,_a : Union[str, Any] ,_a : Optional[int] ,_a : List[str] ,_a : Optional[Any] ,_a : Tuple ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCAmelCase__ ,text_encoder=UpperCAmelCase__ ,tokenizer=UpperCAmelCase__ ,unet=UpperCAmelCase__ ,scheduler=UpperCAmelCase__ ,safety_checker=UpperCAmelCase__ ,feature_extractor=UpperCAmelCase__ ,)
def _a ( self : Any ,_a : List[str] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _a ( self : str ):
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : str ,_a : Optional[int] ,_a : Optional[int] = 512 ,_a : Tuple = 512 ,_a : int = 50 ,_a : int = 7.5 ,_a : List[str] = None ,_a : str = 1 ,_a : int = 0.0 ,_a : List[Any] = None ,_a : List[Any] = None ,_a : Tuple = "pil" ,_a : Optional[int] = True ,_a : Any = None ,_a : Union[str, Any] = 1 ,_a : Any = None ,**_a : List[Any] ,):
'''simple docstring'''
if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
A_ : Any = 1
elif isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
A_ : List[Any] = len(UpperCAmelCase__ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(UpperCAmelCase__ )}.' )
# get prompt text embeddings
A_ : Optional[Any] = self.tokenizer(
UpperCAmelCase__ ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
A_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
A_ : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : Dict = text_embeddings.shape
A_ : List[str] = text_embeddings.repeat(1 ,UpperCAmelCase__ ,1 )
A_ : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt ,UpperCAmelCase__ ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : Tuple = 42
if negative_prompt is None:
A_ : Optional[int] = [""""""]
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !='
f' {type(UpperCAmelCase__ )}.' )
elif isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
A_ : str = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
A_ : Any = negative_prompt
A_ : List[str] = text_input_ids.shape[-1]
A_ : int = self.tokenizer(
UpperCAmelCase__ ,padding="""max_length""" ,max_length=UpperCAmelCase__ ,truncation=UpperCAmelCase__ ,return_tensors="""pt""" ,)
A_ : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : int = uncond_embeddings.shape[1]
A_ : Optional[Any] = uncond_embeddings.repeat(UpperCAmelCase__ ,UpperCAmelCase__ ,1 )
A_ : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt ,UpperCAmelCase__ ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : int = torch.randn(
UpperCAmelCase__ ,generator=UpperCAmelCase__ ,device="""cpu""" ,dtype=UpperCAmelCase__ ).to(self.device )
A_ : Any = torch.randn(UpperCAmelCase__ ,generator=UpperCAmelCase__ ,device="""cpu""" ,dtype=UpperCAmelCase__ ).to(
self.device )
else:
A_ : Dict = torch.randn(
UpperCAmelCase__ ,generator=UpperCAmelCase__ ,device=self.device ,dtype=UpperCAmelCase__ )
A_ : Optional[Any] = torch.randn(UpperCAmelCase__ ,generator=UpperCAmelCase__ ,device=self.device ,dtype=UpperCAmelCase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
A_ : str = latents_reference.to(self.device )
A_ : List[Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : List[str] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : List[str] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Union[str, Any] = 0 if dx < 0 else dx
A_ : List[str] = 0 if dy < 0 else dy
A_ : List[str] = max(-dx ,0 )
A_ : Any = max(-dy ,0 )
# import pdb
# pdb.set_trace()
A_ : int = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : Optional[Any] = {}
if accepts_eta:
A_ : Any = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
A_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : List[str] = self.scheduler.scale_model_input(UpperCAmelCase__ ,UpperCAmelCase__ )
# predict the noise residual
A_ : Optional[Any] = self.unet(UpperCAmelCase__ ,UpperCAmelCase__ ,encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : List[Any] = noise_pred.chunk(2 )
A_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : List[Any] = self.scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
A_ : Optional[Any] = 1 / 0.18215 * latents
A_ : Union[str, Any] = self.vae.decode(UpperCAmelCase__ ).sample
A_ : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Tuple = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
A_ : Tuple = self.feature_extractor(self.numpy_to_pil(UpperCAmelCase__ ) ,return_tensors="""pt""" ).to(
self.device )
A_ , A_ : Tuple = self.safety_checker(
images=UpperCAmelCase__ ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : Optional[int] = None
if output_type == "pil":
A_ : Any = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ ,nsfw_content_detected=UpperCAmelCase__ )
| 665 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =RobertaTokenizer
__a =RobertaTokenizerFast
__a =True
__a ={'cls_token': '<s>'}
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_a = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
_a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_a = {"unk_token": "<unk>"}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def UpperCamelCase__ ( self : Union[str, Any] , **__a : int ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , **__a : Any ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCamelCase__ ( self : List[str] , __a : Tuple ):
_a = "lower newer"
_a = "lower newer"
return input_text, output_text
def UpperCamelCase__ ( self : Tuple ):
_a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = "lower newer"
_a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_a = tokenizer.tokenize(UpperCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def UpperCamelCase__ ( self : List[str] ):
_a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCAmelCase__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCAmelCase__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase__ ( self : List[Any] ):
_a = self.tokenizer_class.from_pretrained("roberta-base" )
_a = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase__ )
_a = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase__ )
_a = tokenizer.encode(
"sequence builders" , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
_a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
_a = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
_a = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase__ ( self : int ):
_a = self.get_tokenizer()
_a = "Encode this sequence."
_a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
_a = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
_a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
_a = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing spaces after special tokens
_a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )} ) # mask token has a left space
_a = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
_a = "Encode <mask> sequence"
_a = "Encode <mask>sequence"
_a = tokenizer.encode(UpperCAmelCase__ )
_a = encoded.index(UpperCAmelCase__ )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a = tokenizer.encode(UpperCAmelCase__ )
_a = encoded.index(UpperCAmelCase__ )
_a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCamelCase__ ( self : Any ):
pass
def UpperCamelCase__ ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_a = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_a = "A, <mask> AllenNLP sentence."
_a = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
_a = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
UpperCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def UpperCamelCase__ ( self : List[str] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
_a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , UpperCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , UpperCAmelCase__ )
def UpperCamelCase__ ( self : Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
_a = f'{text_of_1_token} {text_of_1_token}'
_a = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
_a = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
_a = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
_a = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
_a = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
_a = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_a = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
_a = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
_a = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
_a = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
_a = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
| 692 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _snake_case ( snake_case ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> str:
super().__init__(
features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = Generator(
cache_dir=UpperCAmelCase__ , features=UpperCAmelCase__ , generator=UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
a_ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split='train' , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 697 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
_UpperCAmelCase =model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
_UpperCAmelCase =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase =TextStreamer(UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase =cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
_UpperCAmelCase =model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
_UpperCAmelCase =tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase =TextIteratorStreamer(UpperCAmelCase__ )
_UpperCAmelCase ={"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCAmelCase =Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
_UpperCAmelCase =""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
_UpperCAmelCase =model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
_UpperCAmelCase =greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase =TextStreamer(UpperCAmelCase__ , skip_prompt=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase =cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase =AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =torch.ones((1, 5) , device=UpperCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase =TextStreamer(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=1 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase =cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase =tokenizer(UpperCAmelCase__ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
_UpperCAmelCase =TextIteratorStreamer(UpperCAmelCase__ , timeout=0.001 )
_UpperCAmelCase ={"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCAmelCase =Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase__ ):
_UpperCAmelCase =""
for new_text in streamer:
streamer_text += new_text
| 408 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> int:
'''simple docstring'''
super().__init__(
UpperCAmelCase__ ,question_encoder_tokenizer=UpperCAmelCase__ ,generator_tokenizer=UpperCAmelCase__ ,index=UpperCAmelCase__ ,init_retrieval=UpperCAmelCase__ ,)
__lowercase = None
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
__lowercase = self._infer_socket_ifname()
# avoid clash with the NCCL port
__lowercase = str(distributed_port + 1 )
__lowercase = dist.new_group(ranks=UpperCAmelCase__ ,backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=torch.floataa ) -> int:
'''simple docstring'''
__lowercase = torch.empty(UpperCAmelCase__ ,dtype=UpperCAmelCase__ )
dist.scatter(UpperCAmelCase__ ,src=0 ,scatter_list=UpperCAmelCase__ ,group=self.process_group )
return target_tensor
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__lowercase = next((addr for addr in addrs if addr.startswith('''e''' )) ,UpperCAmelCase__ )
return ifname
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
if not dist.is_initialized():
__lowercase , __lowercase = self._main_retrieve(UpperCAmelCase__ ,UpperCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ )
# distributed training
__lowercase = dist.get_world_size(group=self.process_group )
# gather logic
__lowercase = None
if self._is_main():
__lowercase = [torch.empty(question_hidden_states.shape ,dtype=torch.floataa ) for _ in range(UpperCAmelCase__ )]
dist.gather(torch.tensor(UpperCAmelCase__ ) ,dst=0 ,gather_list=UpperCAmelCase__ ,group=self.process_group )
# scatter logic
__lowercase = question_hidden_states.shape[0]
__lowercase = []
__lowercase = []
if self._is_main():
assert len(UpperCAmelCase__ ) == world_size
__lowercase , __lowercase = self._main_retrieve(torch.cat(UpperCAmelCase__ ).numpy() ,UpperCAmelCase__ )
__lowercase , __lowercase = torch.tensor(UpperCAmelCase__ ), torch.tensor(UpperCAmelCase__ )
__lowercase = self._chunk_tensor(UpperCAmelCase__ ,UpperCAmelCase__ )
__lowercase = self._chunk_tensor(UpperCAmelCase__ ,UpperCAmelCase__ )
__lowercase = self._scattered(UpperCAmelCase__ ,[n_queries, n_docs] ,target_type=torch.intaa )
__lowercase = self._scattered(UpperCAmelCase__ ,[n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase__ )
| 502 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a ( _UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
a_ = ord(_UpperCAmelCase )
if not _is_chinese_char(_UpperCAmelCase ):
return 0
return 1
def a ( _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
a_ = set()
for token in tokens:
a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase )
if chinese_word:
word_set.add(_UpperCAmelCase )
a_ = list(_UpperCAmelCase )
return word_list
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] )
a_ = bert_tokens
a_ , a_ = 0, len(_UpperCAmelCase )
while start < end:
a_ = True
if is_chinese(bert_word[start] ):
a_ = min(end - start , _UpperCAmelCase )
for i in range(_UpperCAmelCase , 1 , -1 ):
a_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a_ = '##' + bert_word[j]
a_ = start + i
a_ = False
break
if single_word:
start += 1
return bert_word
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
a_ = [get_chinese_word(_UpperCAmelCase ) for r in res]
ltp_res.extend(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ):
a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
a_ = []
for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ):
a_ = []
for id in input_ids:
a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase )
input_tokens.append(_UpperCAmelCase )
a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase )
a_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_UpperCAmelCase ):
if token[:2] == "##":
a_ = token[2:]
# save chinese tokens' pos
if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ):
ref_id.append(_UpperCAmelCase )
ref_ids.append(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
return ref_ids
def a ( _UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
a_ = f.readlines()
a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a_ = LTP(args.ltp ) # faster in GPU device
a_ = BertTokenizer.from_pretrained(args.bert )
a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__lowerCAmelCase =parser.parse_args()
main(args)
| 697 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_snake_case : Any = logging.getLogger()
_snake_case : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( _a ):
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
_a = {'''source''': '''What is love ?''', '''target''': '''life'''}
_a = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_a = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase__ , F'{split}.{field}' ) , '''w''' ) as f:
f.write(UpperCAmelCase__ )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = "pytorch" ) -> str:
"""simple docstring"""
_a = self.get_auto_remove_tmp_dir()
_a = os.path.join(UpperCAmelCase__ , '''output''' )
_a = os.path.join(UpperCAmelCase__ , '''data''' )
self._create_dummy_data(data_dir=UpperCAmelCase__ )
_a = F'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(F'--gpus={gpus}' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
_a = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase__ , env=self.get_env() )
_a = os.path.join(UpperCAmelCase__ , '''metrics.json''' )
with open(UpperCAmelCase__ ) as f:
_a = json.load(UpperCAmelCase__ )
return result
@require_torch_gpu
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_a = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_a = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_a = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 22 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase ={
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": 512,
}
__lowerCAmelCase ={
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = RetriBertTokenizer
_UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__="[UNK]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="[PAD]" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> int:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
a_ = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**UpperCAmelCase__ )
a_ = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]:
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]:
a_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 697 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _snake_case ( _lowercase ):
def __init__( self: List[Any] , __lowerCamelCase: str , __lowerCamelCase: List[Any] ) -> str:
__UpperCAmelCase : List[Any] = params
__UpperCAmelCase : Optional[Any] = np.array(UpperCAmelCase__ )
__UpperCAmelCase : List[Any] = np.array([len(UpperCAmelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self: int , __lowerCamelCase: Any ) -> str:
return (self.token_ids[index], self.lengths[index])
def __len__( self: str ) -> List[str]:
return len(self.lengths )
def _lowerCamelCase ( self: Dict ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
__UpperCAmelCase : int = self.params.max_model_input_size
__UpperCAmelCase : Tuple = self.lengths > max_len
logger.info(f'''Splitting {sum(UpperCAmelCase__ )} too long sequences.''' )
def divide_chunks(__lowerCamelCase: Any , __lowerCamelCase: str ):
return [l[i : i + n] for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )]
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : List[Any] = []
if self.params.mlm:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__UpperCAmelCase : Any = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__UpperCAmelCase : Dict = np.insert(UpperCAmelCase__ , 0 , UpperCAmelCase__ )
if sub_s[-1] != sep_id:
__UpperCAmelCase : Tuple = np.insert(UpperCAmelCase__ , len(UpperCAmelCase__ ) , UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCAmelCase__ )
new_tok_ids.extend(UpperCAmelCase__ )
new_lengths.extend([len(UpperCAmelCase__ ) for l in sub_seqs] )
__UpperCAmelCase : List[str] = np.array(UpperCAmelCase__ )
__UpperCAmelCase : int = np.array(UpperCAmelCase__ )
def _lowerCamelCase ( self: int ) -> str:
__UpperCAmelCase : Optional[Any] = len(self )
__UpperCAmelCase : str = self.lengths > 11
__UpperCAmelCase : Any = self.token_ids[indices]
__UpperCAmelCase : Union[str, Any] = self.lengths[indices]
__UpperCAmelCase : List[str] = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCAmelCase : List[Any] = self.params.special_tok_ids["unk_token"]
__UpperCAmelCase : List[str] = len(self )
__UpperCAmelCase : List[Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__UpperCAmelCase : Union[str, Any] = (unk_occs / self.lengths) < 0.5
__UpperCAmelCase : str = self.token_ids[indices]
__UpperCAmelCase : int = self.lengths[indices]
__UpperCAmelCase : Any = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = [t[0] for t in batch]
__UpperCAmelCase : List[Any] = [t[1] for t in batch]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
# Max for paddings
__UpperCAmelCase : List[Any] = max(UpperCAmelCase__ )
# Pad token ids
if self.params.mlm:
__UpperCAmelCase : List[str] = self.params.special_tok_ids["pad_token"]
else:
__UpperCAmelCase : int = self.params.special_tok_ids["unk_token"]
__UpperCAmelCase : int = [list(t.astype(UpperCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase__ )) for t in token_ids]
assert len(tk_ ) == len(UpperCAmelCase__ )
assert all(len(UpperCAmelCase__ ) == max_seq_len_ for t in tk_ )
__UpperCAmelCase : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__UpperCAmelCase : Tuple = torch.tensor(UpperCAmelCase__ ) # (bs)
return tk_t, lg_t
| 382 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "lilt"
def __init__( self , UpperCAmelCase__=3_0522 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0 , UpperCAmelCase__="absolute" , UpperCAmelCase__=None , UpperCAmelCase__=4 , UpperCAmelCase__=1024 , **UpperCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = classifier_dropout
a_ = channel_shrink_ratio
a_ = max_ad_position_embeddings
| 697 | 0 |
import re
def UpperCamelCase ( __lowerCamelCase : int ):
if len(re.findall("[ATCG]" , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 |
'''simple docstring'''
from __future__ import annotations
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# We need to create solution object to save path.
a_ = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
a_ = run_maze(_UpperCAmelCase , 0 , 0 , _UpperCAmelCase )
if solved:
print('\n'.join(str(_UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
"""simple docstring"""
a_ = len(_UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
a_ = 1
return True
a_ = (not i < 0) and (not j < 0) # Check lower bounds
a_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a_ = 1
# check for directions
if (
run_maze(_UpperCAmelCase , i + 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j + 1 , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
or run_maze(_UpperCAmelCase , _UpperCAmelCase , j - 1 , _UpperCAmelCase )
):
return True
a_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
def UpperCamelCase( __UpperCamelCase : str ):
assert column_title.isupper()
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[Any] = len(_UpperCAmelCase ) - 1
lowerCAmelCase_ : Tuple = 0
while index >= 0:
lowerCAmelCase_ : Tuple = (ord(column_title[index] ) - 64) * pow(26 ,_UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 171 |
'''simple docstring'''
__lowerCAmelCase ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
"""simple docstring"""
a_ = from_type.lower().strip('s' )
a_ = to_type.lower().strip('s' )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
a_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
a_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
a_ = METRIC_CONVERSION[from_sanitized]
a_ = METRIC_CONVERSION[to_sanitized]
a_ = 1
if from_exponent > to_exponent:
a_ = from_exponent - to_exponent
else:
a_ = -(to_exponent - from_exponent)
return value * pow(1_0 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
"""simple docstring"""
import os
def lowerCamelCase_ ():
with open(os.path.dirname(_UpperCAmelCase ) + '''/p022_names.txt''' ) as file:
_UpperCAmelCase : Optional[int] = str(file.readlines()[0] )
_UpperCAmelCase : List[Any] = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Any = 0
for i, name in enumerate(_UpperCAmelCase ):
for letter in name:
name_score += ord(_UpperCAmelCase ) - 64
total_score += (i + 1) * name_score
_UpperCAmelCase : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 506 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(_UpperCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 84 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 16
UpperCAmelCase__ = 32
def a_ (__A , __A = 16 ) -> Tuple:
"""simple docstring"""
__a : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
__a : Any = load_dataset("glue" , "mrpc" )
def tokenize_function(__A ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : Dict = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Optional[int] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : List[str] = 16
elif accelerator.mixed_precision != "no":
__a : int = 8
else:
__a : str = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
__a : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
__a : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ = mocked_dataloaders # noqa: F811
def a_ (__A , __A ) -> str:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1":
__a : List[str] = 2
# Initialize accelerator
__a : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : str = config["lr"]
__a : Union[str, Any] = int(config["num_epochs"] )
__a : List[Any] = int(config["seed"] )
__a : Tuple = int(config["batch_size"] )
__a : List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
__a : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__a : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
__a : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
__a , __a : List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__a : Optional[Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
__a : Any = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a : Dict = model(**_UpperCAmelCase )
__a : Union[str, Any] = outputs.loss
__a : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__a : Optional[int] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a : str = model(**_UpperCAmelCase )
__a : List[str] = outputs.logits.argmax(dim=-1 )
__a , __a : Tuple = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__a : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
__a : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _UpperCAmelCase )
def a_ () -> Union[str, Any]:
"""simple docstring"""
__a : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__a : Any = parser.parse_args()
__a : Dict = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 351 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=3 , UpperCAmelCase__=18 , UpperCAmelCase__=30 , UpperCAmelCase__=400 , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=True , ) -> List[Any]:
a_ = size if size is not None else {'height': 18, 'width': 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'apply_ocr' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# with apply_OCR = True
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
a_ = Image.open(ds[0]['file'] ).convert('RGB' )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
a_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
a_ = image_processing(UpperCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 697 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import math
def a ( _UpperCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( _UpperCAmelCase = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
a_ = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
a_ = []
a_ = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] ):
_a = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_a = Vector()
def UpperCamelCase__ ( self : Dict ):
_a = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase__ ) , "(0,0,0,0,0,1)" )
def UpperCamelCase__ ( self : List[Any] ):
_a = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase__ ) , 4 )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = Vector([1, 2] )
_a = Vector([1, 2, 3, 4, 5] )
_a = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_a = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCamelCase__ ( self : int ):
_a = Vector([1, 2, 3] )
_a = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase__ ( self : List[str] ):
_a = Vector([1, 2, 3] )
_a = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase__ ( self : Dict ):
_a = Vector([1, 2, 3] )
_a = Vector([2, -1, 4] ) # for test of dot product
_a = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def UpperCamelCase__ ( self : int ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def UpperCamelCase__ ( self : Optional[int] ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def UpperCamelCase__ ( self : str ):
_a = Vector([1, 2, 3] )
_a = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase__ , UpperCAmelCase__ ) ) , "(3,4,7)" )
def UpperCamelCase__ ( self : List[Any] ):
_a = Vector([1, 0, 0, 0, 0, 0] )
_a = x.copy()
self.assertEqual(str(UpperCAmelCase__ ) , str(UpperCAmelCase__ ) )
def UpperCamelCase__ ( self : int ):
_a = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase__ ) , "(0,1,0)" )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(UpperCAmelCase__ ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase__ , UpperCAmelCase__ ) )
def UpperCamelCase__ ( self : List[Any] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase__ , UpperCAmelCase__ ) )
def UpperCamelCase__ ( self : int ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase__ ( self : Optional[int] ):
_a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_a = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def UpperCamelCase__ ( self : int ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(UpperCAmelCase__ ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase__ ( self : List[str] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def UpperCamelCase__ ( self : Any ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 692 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 10
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = [1, 2, 3, 4]
a_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
a_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = ''
a_ , a_ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
a_ , a_ = process_story(UpperCAmelCase__ )
a_ = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = ['It was the best of times.']
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = torch.tensor([1, 2, 3, 4] )
a_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
a_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = 101
a_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
a_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
a_ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 697 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : str = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 408 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , snake_case ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> int:
a_ = load_tool('text-question-answering' )
self.tool.setup()
a_ = load_tool('text-question-answering' , remote=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = self.remote_tool(UpperCAmelCase__ , 'What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a_ = self.tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = self.remote_tool(text=UpperCAmelCase__ , question='What did Hugging Face do in April 2021?' )
self.assertEqual(UpperCAmelCase__ , 'launched the BigScience Research Workshop' )
| 697 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = False ,) -> Any:
'''simple docstring'''
super().__init__()
__lowercase = nn.Embedding(UpperCAmelCase__ ,UpperCAmelCase__ )
__lowercase = nn.Embedding(UpperCAmelCase__ ,UpperCAmelCase__ )
__lowercase = False
__lowercase = nn.Dropout(p=UpperCAmelCase__ )
__lowercase = TaConfig(
vocab_size=UpperCAmelCase__ ,d_model=UpperCAmelCase__ ,num_heads=UpperCAmelCase__ ,d_kv=UpperCAmelCase__ ,d_ff=UpperCAmelCase__ ,dropout_rate=UpperCAmelCase__ ,feed_forward_proj=UpperCAmelCase__ ,is_decoder=UpperCAmelCase__ ,is_encoder_decoder=UpperCAmelCase__ ,)
__lowercase = nn.ModuleList()
for lyr_num in range(UpperCAmelCase__ ):
__lowercase = TaBlock(UpperCAmelCase__ )
self.encoders.append(UpperCAmelCase__ )
__lowercase = TaLayerNorm(UpperCAmelCase__ )
__lowercase = nn.Dropout(p=UpperCAmelCase__ )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.token_embedder(UpperCAmelCase__ )
__lowercase = encoder_input_tokens.shape[1]
__lowercase = torch.arange(UpperCAmelCase__ ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCAmelCase__ )
__lowercase = self.dropout_pre(UpperCAmelCase__ )
# inverted the attention mask
__lowercase = encoder_input_tokens.size()
__lowercase = self.get_extended_attention_mask(UpperCAmelCase__ ,UpperCAmelCase__ )
for lyr in self.encoders:
__lowercase = lyr(UpperCAmelCase__ ,UpperCAmelCase__ )[0]
__lowercase = self.layer_norm(UpperCAmelCase__ )
return self.dropout_post(UpperCAmelCase__ ), encoder_inputs_mask
| 502 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase ={"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 697 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : List[Any] = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A ( _a ):
lowercase_ = 'blenderbot-small'
lowercase_ = ['past_key_values']
lowercase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=5_02_65 , lowerCAmelCase_ : Optional[Any]=5_12 , lowerCAmelCase_ : List[Any]=8 , lowerCAmelCase_ : str=20_48 , lowerCAmelCase_ : Any=16 , lowerCAmelCase_ : List[Any]=8 , lowerCAmelCase_ : Optional[Any]=20_48 , lowerCAmelCase_ : List[Any]=16 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Optional[Any]=5_12 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : str=0.0_2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=2 , **lowerCAmelCase_ : Optional[int] , ) -> str:
"""simple docstring"""
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
class A ( _a ):
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(UpperCAmelCase__ ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(UpperCAmelCase__ , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(UpperCAmelCase__ ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] = -1 , lowerCAmelCase_ : Union[str, Any] = -1 , lowerCAmelCase_ : Union[str, Any] = False , lowerCAmelCase_ : int = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**UpperCAmelCase__ , **UpperCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
_a = common_inputs['''decoder_input_ids'''].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(UpperCAmelCase__ , UpperCAmelCase__ )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(UpperCAmelCase__ , UpperCAmelCase__ )
_a = max(UpperCAmelCase__ , UpperCAmelCase__ ) - min_num_layers
_a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase__ ),
torch.zeros(UpperCAmelCase__ ),
torch.zeros(UpperCAmelCase__ ),
torch.zeros(UpperCAmelCase__ ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCAmelCase__ , UpperCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase__ ), torch.zeros(UpperCAmelCase__ )) )
return common_inputs
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple = -1 , lowerCAmelCase_ : Dict = -1 , lowerCAmelCase_ : Optional[int] = False , lowerCAmelCase_ : List[Any] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['''attention_mask'''].dtype
_a = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(UpperCAmelCase__ , UpperCAmelCase__ , dtype=UpperCAmelCase__ )] , dim=1 )
_a = [
(torch.zeros(UpperCAmelCase__ ), torch.zeros(UpperCAmelCase__ )) for _ in range(UpperCAmelCase__ )
]
return common_inputs
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] = -1 , lowerCAmelCase_ : str = -1 , lowerCAmelCase_ : Tuple = False , lowerCAmelCase_ : int = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_a = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
_a = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
_a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) )
return common_inputs
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple = -1 , lowerCAmelCase_ : List[Any] = -1 , lowerCAmelCase_ : Optional[int] = False , lowerCAmelCase_ : Union[str, Any] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
elif self.task == "causal-lm":
_a = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
else:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
return common_inputs
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
_a = super(UpperCAmelCase__ , self )._flatten_past_key_values_(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 22 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase =logging.get_logger(__name__)
__lowerCAmelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = "vit"
def __init__( self , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3072 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=224 , UpperCAmelCase__=16 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=16 , **UpperCAmelCase__ , ) -> Dict:
super().__init__(**UpperCAmelCase__ )
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = layer_norm_eps
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = qkv_bias
a_ = encoder_stride
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 697 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self: List[str] , __lowerCamelCase: List[str] , __lowerCamelCase: Any=7 , __lowerCamelCase: int=3 , __lowerCamelCase: Any=10 , __lowerCamelCase: str=18 , __lowerCamelCase: Union[str, Any]=30 , __lowerCamelCase: Optional[int]=4_00 , __lowerCamelCase: Tuple=True , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Tuple=True , __lowerCamelCase: Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase: List[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase: Any=None , ) -> int:
__UpperCAmelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 18}
__UpperCAmelCase : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[Any] = num_frames
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Dict = min_resolution
__UpperCAmelCase : Any = max_resolution
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : Union[str, Any] = size
__UpperCAmelCase : Any = do_normalize
__UpperCAmelCase : Optional[int] = image_mean
__UpperCAmelCase : str = image_std
__UpperCAmelCase : Dict = crop_size
def _lowerCamelCase ( self: List[str] ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Any = VivitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self: str ) -> int:
__UpperCAmelCase : List[str] = VivitImageProcessingTester(self )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self: Any ) -> str:
__UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase__ , "size" ) )
def _lowerCamelCase ( self: Tuple ) -> int:
__UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
# Initialize image_processing
__UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__UpperCAmelCase : int = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__UpperCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Any = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCamelCase ( self: str ) -> List[Any]:
# Initialize image_processing
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__UpperCAmelCase : Optional[int] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : str = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCamelCase ( self: str ) -> Dict:
# Initialize image_processing
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__UpperCAmelCase : str = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : str = image_processing(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 382 |
'''simple docstring'''
def a ( _UpperCAmelCase = 5_0 ) -> int:
"""simple docstring"""
a_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 697 | 0 |
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
return "".join(chr(ord(_UpperCAmelCase ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 204 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def a ( ) -> None:
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 697 | 0 |
import argparse
import json
from tqdm import tqdm
def UpperCamelCase( ):
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' ,type=_UpperCAmelCase ,default='''biencoder-nq-dev.json''' ,help='''Path to raw DPR training data''' ,)
parser.add_argument(
'''--evaluation_set''' ,type=_UpperCAmelCase ,help='''where to store parsed evaluation_set file''' ,)
parser.add_argument(
'''--gold_data_path''' ,type=_UpperCAmelCase ,help='''where to store parsed gold_data_path file''' ,)
lowerCAmelCase_ : str = parser.parse_args()
with open(args.src_path ,'''r''' ) as src_file, open(args.evaluation_set ,'''w''' ) as eval_file, open(
args.gold_data_path ,'''w''' ) as gold_file:
lowerCAmelCase_ : str = json.load(_UpperCAmelCase )
for dpr_record in tqdm(_UpperCAmelCase ):
lowerCAmelCase_ : int = dpr_record['''question''']
lowerCAmelCase_ : Dict = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(_UpperCAmelCase ) + '''\n''' )
if __name__ == "__main__":
main()
| 171 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( snake_case ):
"""simple docstring"""
@staticmethod
def __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ) -> str:
a_ = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCAmelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = model
a_ = cache
a_ = force
a_ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 697 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> None:
_UpperCAmelCase : List[str] = value
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Any = None
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> None:
_UpperCAmelCase : Optional[Any] = tree
def __lowerCAmelCase ( self , A ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 |
'''simple docstring'''
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
assert column_title.isupper()
a_ = 0
a_ = len(_UpperCAmelCase ) - 1
a_ = 0
while index >= 0:
a_ = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 697 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase , lowercase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 84 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase =logging.get_logger(__name__)
class _snake_case ( snake_case ):
"""simple docstring"""
_UpperCamelCase = ["audio_values", "audio_mask"]
def __init__( self , UpperCAmelCase__=2048 , UpperCAmelCase__=1 , UpperCAmelCase__=[16, 16] , UpperCAmelCase__=128 , UpperCAmelCase__=4_4100 , UpperCAmelCase__=86 , UpperCAmelCase__=2048 , UpperCAmelCase__=0.0 , **UpperCAmelCase__ , ) -> Union[str, Any]:
super().__init__(
feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ , )
a_ = spectrogram_length
a_ = num_channels
a_ = patch_size
a_ = feature_size // self.patch_size[1]
a_ = n_fft
a_ = sampling_rate // hop_length_to_sampling_rate
a_ = sampling_rate
a_ = padding_value
a_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCAmelCase__ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> np.ndarray:
a_ = spectrogram(
UpperCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
a_ = log_spec[:, :-1]
a_ = log_spec - 2_0.0
a_ = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , **UpperCAmelCase__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a_ = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a_ = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
a_ = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , UpperCAmelCase__ ):
a_ = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a_ = np.array(UpperCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
a_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a_ = np.ones([len(UpperCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a_ = padded_audio_features * self.padding_value
for i in range(len(UpperCAmelCase__ ) ):
a_ = audio_features[i]
a_ = feature
# return as BatchFeature
if return_attention_mask:
a_ = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a_ = {'audio_values': padded_audio_features}
a_ = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
return encoded_inputs
| 697 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class snake_case_ :
"""simple docstring"""
snake_case__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case__ = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case__ = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case__ = field(
default=__UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
snake_case__ = field(
default=__UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
snake_case__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
snake_case__ = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class snake_case_ :
"""simple docstring"""
snake_case__ = field(default=__UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
snake_case__ = field(
default=__UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
snake_case__ = field(
default=__UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
snake_case__ = field(
default=__UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
snake_case__ = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case__ = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
snake_case__ = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
snake_case__ = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase__ (self: List[str] ) -> List[Any]:
'''simple docstring'''
if self.train_file is not None:
__a : Dict = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__a : Any = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class snake_case_ :
"""simple docstring"""
snake_case__ = 42
snake_case__ = True
snake_case__ = None
snake_case__ = None
def __call__(self: Union[str, Any] , __UpperCAmelCase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__a : Dict = "label" if "label" in features[0].keys() else "labels"
__a : Dict = [feature.pop(UpperCAmelCase__ ) for feature in features]
__a : List[Any] = len(UpperCAmelCase__ )
__a : Optional[int] = len(features[0]["input_ids"] )
__a : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
__a : Any = list(chain(*UpperCAmelCase__ ) )
__a : List[str] = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__a : Union[str, Any] = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
__a : Dict = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def a_ () -> Tuple:
"""simple docstring"""
__a : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a : int = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__a : int = {}
if data_args.train_file is not None:
__a : List[Any] = data_args.train_file
if data_args.validation_file is not None:
__a : int = data_args.validation_file
__a : Dict = data_args.train_file.split("." )[-1]
__a : Any = load_dataset(
_UpperCAmelCase , data_files=_UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__a : Any = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__a : Union[str, Any] = [f'ending{i}' for i in range(4 )]
__a : Dict = "sent1"
__a : Optional[Any] = "sent2"
if data_args.max_seq_length is None:
__a : int = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__a : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__A ):
__a : Tuple = [[context] * 4 for context in examples[context_name]]
__a : List[str] = examples[question_header_name]
__a : str = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(_UpperCAmelCase )
]
# Flatten out
__a : Any = list(chain(*_UpperCAmelCase ) )
__a : Tuple = list(chain(*_UpperCAmelCase ) )
# Tokenize
__a : Tuple = tokenizer(
_UpperCAmelCase , _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__a : Optional[Any] = raw_datasets["train"]
if data_args.max_train_samples is not None:
__a : int = min(len(_UpperCAmelCase ) , data_args.max_train_samples )
__a : Optional[Any] = train_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__a : List[Any] = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__a : List[Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__a : int = min(len(_UpperCAmelCase ) , data_args.max_eval_samples )
__a : Optional[Any] = eval_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__a : Optional[int] = eval_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__a : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__A ):
__a , __a : Dict = eval_predictions
__a : Optional[int] = np.argmax(_UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__a : Optional[int] = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , )
# Training
if training_args.do_train:
__a : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
__a : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : Tuple = last_checkpoint
__a : Dict = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__a : Optional[Any] = train_result.metrics
__a : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
__a : Dict = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics("train" , _UpperCAmelCase )
trainer.save_metrics("train" , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__a : List[Any] = trainer.evaluate()
__a : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
__a : List[Any] = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics("eval" , _UpperCAmelCase )
trainer.save_metrics("eval" , _UpperCAmelCase )
__a : Any = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def a_ (__A ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 351 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
a_ = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
a_ = [element for element in set_a if element in set_b]
if alternative_union:
a_ = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
a_ = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__lowerCAmelCase ={"a", "b", "c", "d", "e"}
__lowerCAmelCase ={"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 697 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """vit"""
def __init__( self : Any ,_a : Dict=768 ,_a : Optional[int]=12 ,_a : List[Any]=12 ,_a : Any=3072 ,_a : Dict="gelu" ,_a : int=0.0 ,_a : str=0.0 ,_a : Tuple=0.02 ,_a : Union[str, Any]=1e-12 ,_a : Union[str, Any]=224 ,_a : Dict=16 ,_a : Union[str, Any]=3 ,_a : Dict=True ,_a : Optional[int]=16 ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
A_ : List[Any] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : Union[str, Any] = layer_norm_eps
A_ : List[Any] = image_size
A_ : Union[str, Any] = patch_size
A_ : int = num_channels
A_ : str = qkv_bias
A_ : Union[str, Any] = encoder_stride
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = version.parse("""1.11""" )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self : Dict ):
'''simple docstring'''
return 1e-4
| 665 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
a_ = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.