code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """maskformer"""
UpperCAmelCase_ : Optional[Any] = {"""hidden_size""": """mask_feature_size"""}
UpperCAmelCase_ : Optional[int] = ["""resnet""", """swin"""]
UpperCAmelCase_ : Optional[Any] = ["""detr"""]
def __init__( self : str , lowercase_ : int = 256 , lowercase_ : int = 256 , lowercase_ : float = 0.1 , lowercase_ : bool = False , lowercase_ : Optional[Dict] = None , lowercase_ : Optional[Dict] = None , lowercase_ : float = 0.02 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 20.0 , lowercase_ : Optional[bool] = None , **lowercase_ : Optional[Any] , ) -> Tuple:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase : Tuple = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[int] = backbone_config.pop('model_type' )
UpperCAmelCase : Tuple = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : Dict = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase : List[Any] = (
decoder_config.pop('model_type' ) if isinstance(lowercase_ , lowercase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Dict = CONFIG_MAPPING[decoder_type]
UpperCAmelCase : List[str] = config_class.from_dict(lowercase_ )
UpperCAmelCase : Any = backbone_config
UpperCAmelCase : Optional[int] = decoder_config
# main feature dimension for the model
UpperCAmelCase : str = fpn_feature_size
UpperCAmelCase : List[Any] = mask_feature_size
# initializer
UpperCAmelCase : Tuple = init_std
UpperCAmelCase : int = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase : str = cross_entropy_weight
UpperCAmelCase : Union[str, Any] = dice_weight
UpperCAmelCase : Any = mask_weight
UpperCAmelCase : Tuple = use_auxiliary_loss
UpperCAmelCase : Any = no_object_weight
UpperCAmelCase : str = output_auxiliary_logits
UpperCAmelCase : Tuple = self.decoder_config.encoder_attention_heads
UpperCAmelCase : int = self.decoder_config.num_hidden_layers
super().__init__(**lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] , lowercase_ : PretrainedConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] ) -> int:
return cls(
backbone_config=lowercase_ , decoder_config=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict[str, any]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[Any] = self.backbone_config.to_dict()
UpperCAmelCase : Dict = self.decoder_config.to_dict()
UpperCAmelCase : Optional[int] = self.__class__.model_type
return output
| 695 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowercase__ = logging.get_logger(__name__)
lowercase__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowercase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str = field(
default=_snake_case , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_snake_case )} )
UpperCAmelCase_ : str = field(
default=_snake_case , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
UpperCAmelCase_ : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
UpperCAmelCase_ : int = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
UpperCAmelCase_ : int = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
UpperCAmelCase_ : int = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
UpperCAmelCase_ : bool = field(
default=_snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
UpperCAmelCase_ : bool = field(
default=_snake_case , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
UpperCAmelCase_ : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
UpperCAmelCase_ : int = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
UpperCAmelCase_ : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
UpperCAmelCase_ : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """train"""
UpperCAmelCase_ : List[Any] = """dev"""
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : SquadDataTrainingArguments
UpperCAmelCase_ : List[SquadFeatures]
UpperCAmelCase_ : Split
UpperCAmelCase_ : bool
def __init__( self : str , lowercase_ : SquadDataTrainingArguments , lowercase_ : PreTrainedTokenizer , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Split] = Split.train , lowercase_ : Optional[bool] = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = "pt" , ) -> str:
UpperCAmelCase : List[Any] = args
UpperCAmelCase : List[str] = is_language_sensitive
UpperCAmelCase : Tuple = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_ , lowercase_ ):
try:
UpperCAmelCase : Optional[Any] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
UpperCAmelCase : List[Any] = mode
# Load data features from cache or dataset file
UpperCAmelCase : Dict = 'v2' if args.version_2_with_negative else 'v1'
UpperCAmelCase : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : Dict = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
UpperCAmelCase : Union[str, Any] = time.time()
UpperCAmelCase : Optional[int] = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCAmelCase : int = self.old_features['features']
UpperCAmelCase : Dict = self.old_features.get('dataset' , lowercase_ )
UpperCAmelCase : List[Any] = self.old_features.get('examples' , lowercase_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
' future run' )
else:
if mode == Split.dev:
UpperCAmelCase : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
else:
UpperCAmelCase : int = self.processor.get_train_examples(args.data_dir )
UpperCAmelCase , UpperCAmelCase : Optional[int] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase_ , )
UpperCAmelCase : Any = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , lowercase_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[str] ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self : Union[str, Any] , lowercase_ : int ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
UpperCAmelCase : Any = self.features[i]
UpperCAmelCase : List[str] = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCAmelCase : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCAmelCase : Optional[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCAmelCase : Union[str, Any] = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCAmelCase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCAmelCase : Optional[int] = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCAmelCase : Any = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCAmelCase : Union[str, Any] = torch.tensor(feature.start_position , dtype=torch.long )
UpperCAmelCase : str = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 695 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase_ )
print('The following activities are selected:' )
# The first activity is always selected
UpperCAmelCase : List[Any] = 0
print(UpperCAmelCase_ , end=',' )
# Consider rest of the activities
for j in range(UpperCAmelCase_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(UpperCAmelCase_ , end=',' )
UpperCAmelCase : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = [1, 3, 0, 5, 8, 5]
lowercase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 695 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 1 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 1 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
UpperCAmelCase : Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : str = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
UpperCAmelCase : List[Any] = [sys.executable] + distributed_args
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 1 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ = "src/diffusers"
# Pattern that looks at the indentation in a line.
lowercase__ = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ = re.compile(r"\[([^\]]+)\]")
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = _re_indent.search(UpperCAmelCase_ )
return "" if search is None else search.groups()[0]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_="" , UpperCAmelCase_=None , UpperCAmelCase_=None ):
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[str] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase_ ):
index += 1
UpperCAmelCase : Optional[int] = ['\n'.join(lines[:index] )]
else:
UpperCAmelCase : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase : Tuple = [lines[index]]
index += 1
while index < len(UpperCAmelCase_ ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(UpperCAmelCase_ ) )
if index < len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
UpperCAmelCase : Any = []
else:
blocks.append('\n'.join(UpperCAmelCase_ ) )
UpperCAmelCase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase_ ) > 0:
blocks.append('\n'.join(UpperCAmelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase_ ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def UpperCamelCase( UpperCAmelCase_ ):
def _inner(UpperCAmelCase_ ):
return key(UpperCAmelCase_ ).lower().replace('_' , '' )
return _inner
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=None ):
# If no key is provided, we use a noop.
def noop(UpperCAmelCase_ ):
return x
if key is None:
UpperCAmelCase : Dict = noop
# Constants are all uppercase, they go first.
UpperCAmelCase : Dict = [obj for obj in objects if key(UpperCAmelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase : Optional[Any] = [obj for obj in objects if key(UpperCAmelCase_ )[0].isupper() and not key(UpperCAmelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase : Optional[int] = [obj for obj in objects if not key(UpperCAmelCase_ )[0].isupper()]
UpperCAmelCase : List[Any] = ignore_underscore(UpperCAmelCase_ )
return sorted(UpperCAmelCase_ , key=UpperCAmelCase_ ) + sorted(UpperCAmelCase_ , key=UpperCAmelCase_ ) + sorted(UpperCAmelCase_ , key=UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
# This inner function sort imports between [ ].
def _replace(UpperCAmelCase_ ):
UpperCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
UpperCAmelCase : Any = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase : Optional[int] = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(UpperCAmelCase_ )] ) + "]"
UpperCAmelCase : Optional[int] = import_statement.split('\n' )
if len(UpperCAmelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase : Union[str, Any] = 2 if lines[1].strip() == '[' else 1
UpperCAmelCase : Dict = [(i, _re_strip_line.search(UpperCAmelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase : int = sort_objects(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[1] )
UpperCAmelCase : str = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCAmelCase : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase : Optional[int] = keys[:-1]
UpperCAmelCase : List[Any] = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(UpperCAmelCase_ )] )
return "\n".join(UpperCAmelCase_ )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase : List[Any] = _re_bracket_content.sub(_replace , UpperCAmelCase_ )
return import_statement
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=True ):
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase : Tuple = split_code_in_indented_blocks(
UpperCAmelCase_ , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase : Tuple = main_blocks[block_idx]
UpperCAmelCase : Optional[int] = block.split('\n' )
# Get to the start of the imports.
UpperCAmelCase : Any = 0
while line_idx < len(UpperCAmelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase : List[Any] = len(UpperCAmelCase_ )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase : Tuple = '\n'.join(block_lines[line_idx:-1] )
UpperCAmelCase : Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase : Optional[Any] = split_code_in_indented_blocks(UpperCAmelCase_ , indent_level=UpperCAmelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase : List[Any] = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase : Dict = [(pattern.search(UpperCAmelCase_ ).groups()[0] if pattern.search(UpperCAmelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase : Optional[Any] = [(i, key) for i, key in enumerate(UpperCAmelCase_ ) if key is not None]
UpperCAmelCase : List[str] = [x[0] for x in sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase : str = 0
UpperCAmelCase : Tuple = []
for i in range(len(UpperCAmelCase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCAmelCase_ )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase : Dict = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(UpperCAmelCase_ , 'w' ) as f:
f.write('\n'.join(UpperCAmelCase_ ) )
def UpperCamelCase( UpperCAmelCase_=True ):
UpperCAmelCase : List[str] = []
for root, _, files in os.walk(UpperCAmelCase_ ):
if "__init__.py" in files:
UpperCAmelCase : Optional[int] = sort_imports(os.path.join(UpperCAmelCase_ , '__init__.py' ) , check_only=UpperCAmelCase_ )
if result:
UpperCAmelCase : List[str] = [os.path.join(UpperCAmelCase_ , '__init__.py' )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(UpperCAmelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowercase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
'''simple docstring'''
lowercase__ = 65521
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = 1
UpperCAmelCase : Optional[Any] = 0
for plain_chr in plain_text:
UpperCAmelCase : int = (a + ord(UpperCAmelCase_ )) % MOD_ADLER
UpperCAmelCase : List[str] = (b + a) % MOD_ADLER
return (b << 16) | a
| 695 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = tmp_path / 'cache'
UpperCAmelCase : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Any = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : int = features.copy() if features else default_expected_features
UpperCAmelCase : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Union[str, Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = tmp_path / 'cache'
UpperCAmelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , split=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase : Union[str, Any] = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase : Dict = [parquet_path]
UpperCAmelCase : Any = tmp_path / 'cache'
UpperCAmelCase : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : int = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=("train",) ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for split in splits:
UpperCAmelCase : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = tmp_path / 'cache'
UpperCAmelCase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Any = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = tmp_path / 'cache'
UpperCAmelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : List[Any] = features.copy() if features else default_expected_features
UpperCAmelCase : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Optional[int] = ParquetDatasetReader({'train': parquet_path} , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if split:
UpperCAmelCase : Optional[int] = {split: parquet_path}
else:
UpperCAmelCase : Optional[Any] = 'train'
UpperCAmelCase : Optional[Any] = {'train': parquet_path, 'test': parquet_path}
UpperCAmelCase : Union[str, Any] = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Union[str, Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCAmelCase : Tuple = pq.ParquetFile(tmp_path / 'foo.parquet' )
UpperCAmelCase : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = str(shared_datadir / 'test_image_rgb.jpg' )
UpperCAmelCase : Any = {'image': [image_path]}
UpperCAmelCase : Optional[int] = Features({'image': Image()} )
UpperCAmelCase : Tuple = Dataset.from_dict(SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : Optional[int] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
UpperCAmelCase : Optional[int] = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
UpperCAmelCase : Tuple = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=SCREAMING_SNAKE_CASE_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert get_writer_batch_size(SCREAMING_SNAKE_CASE_ ) == expected
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase : Optional[int] = mock.Mock()
UpperCAmelCase : Union[str, Any] = 500
UpperCAmelCase : int = {}
UpperCAmelCase : Any = HTTPError
UpperCAmelCase : Optional[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase__ ) as mock_head:
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase_ ( cls : List[str] ) -> List[Any]:
UpperCAmelCase : str = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id='test-feature-extractor' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase__ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase : str = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = VQModel
UpperCAmelCase_ : Union[str, Any] = """sample"""
@property
def UpperCAmelCase_ ( self : List[str] , lowercase_ : List[str]=(32, 32) ) -> Tuple:
UpperCAmelCase : Any = 4
UpperCAmelCase : str = 3
UpperCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def UpperCAmelCase_ ( self : Tuple ) -> str:
return (3, 32, 32)
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
return (3, 32, 32)
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
UpperCAmelCase : Union[str, Any] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
UpperCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
pass
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Dict = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCAmelCase : List[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCAmelCase : List[Any] = image.to(UpperCAmelCase_ )
with torch.no_grad():
UpperCAmelCase : List[str] = model(UpperCAmelCase_ ).sample
UpperCAmelCase : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase : Union[str, Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : List[str] = os.path.join(UpperCAmelCase_ , 'all_results.json' )
if os.path.exists(UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Tuple = json.load(UpperCAmelCase_ )
else:
raise ValueError(F"""can\'t find {path}""" )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class A_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
import xla_spawn
UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Tuple = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
UpperCAmelCase : List[Any] = time()
xla_spawn.main()
UpperCAmelCase : Any = time()
UpperCAmelCase : str = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
import xla_spawn
UpperCAmelCase : Any = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(A_ , 'argv' , A_ ):
xla_spawn.main()
| 703 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase( ):
UpperCAmelCase : int = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_lowercase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_lowercase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_lowercase )
return parser.parse_args()
def UpperCamelCase( ):
UpperCAmelCase : str = parse_args()
# Import training_script as a module.
UpperCAmelCase : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase : Union[str, Any] = script_fpath.stem
UpperCAmelCase : Optional[Any] = importlib.import_module(_lowercase )
# Patch sys.argv
UpperCAmelCase : Tuple = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Any = 42
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Union[str, Any] = None
def UpperCamelCase( ):
UpperCAmelCase : int = Node(1 )
UpperCAmelCase : str = Node(2 )
UpperCAmelCase : int = Node(3 )
UpperCAmelCase : Any = Node(4 )
UpperCAmelCase : Dict = Node(5 )
return tree
def UpperCamelCase( UpperCAmelCase_ ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase( UpperCAmelCase_ ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase( UpperCAmelCase_ ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase( UpperCAmelCase_ ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : list[Any] = []
if root is None:
return output
UpperCAmelCase : Optional[Any] = deque([root] )
while process_queue:
UpperCAmelCase : List[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : list[Any] = []
def populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_A , _A )
return output
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : list[Any] = []
def populate_output(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_A , _A )
return output
def UpperCamelCase( UpperCAmelCase_ ):
if root is None:
return []
UpperCAmelCase : list[Sequence[Node | None]] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = height(_A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_A , _A ) )
UpperCAmelCase : str = 1
else:
output.append(get_nodes_from_right_to_left(_A , _A ) )
UpperCAmelCase : int = 0
return output
def UpperCamelCase( ): # Main function for testing.
UpperCAmelCase : str = make_tree()
print(F"""In-order Traversal: {inorder(_A )}""" )
print(F"""Pre-order Traversal: {preorder(_A )}""" )
print(F"""Post-order Traversal: {postorder(_A )}""" , '\n' )
print(F"""Height of Tree: {height(_A )}""" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(_A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(_A ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(_A , level=_A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
import numpy as np
from PIL import Image
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = np.array(lowerCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : str = 0
# compute the shape of the output matrix
UpperCAmelCase : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCAmelCase : Dict = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCAmelCase : Dict = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Optional[int] = 0
return updated_arr
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = np.array(lowerCamelCase_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : int = 0
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = 0
# compute the shape of the output matrix
UpperCAmelCase : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCAmelCase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCAmelCase : List[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
lowercase__ = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = year % 19
UpperCAmelCase : Optional[int] = year % 4
UpperCAmelCase : int = year % 7
UpperCAmelCase : int = math.floor(year / 1_00 )
UpperCAmelCase : Optional[int] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCAmelCase : List[Any] = leap_day_inhibits / 4
UpperCAmelCase : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCAmelCase : List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase : List[str] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCAmelCase_ , 4 , 18 )
else:
return datetime(UpperCAmelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowercase__ = "will be" if year > datetime.now().year else "was"
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
UpperCAmelCase : Any = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCAmelCase : List[Any] = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
UpperCAmelCase : Dict = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Any , **lowercase_ : Any ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : Tuple , **lowercase_ : Any ) -> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase : Any = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_image_processor()
UpperCAmelCase : int = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase : str = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
UpperCAmelCase : str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase : Any = self.get_image_processor()
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : str = self.prepare_image_inputs()
UpperCAmelCase : Any = image_processor(lowercase_ , return_tensors='np' )
UpperCAmelCase : Union[str, Any] = processor(images=lowercase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : str = self.get_image_processor()
UpperCAmelCase : Optional[Any] = self.get_tokenizer()
UpperCAmelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : int = '''lower newer'''
UpperCAmelCase : List[str] = processor(text=lowercase_ )
UpperCAmelCase : Tuple = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : int ) -> Tuple:
UpperCAmelCase : Optional[int] = self.get_image_processor()
UpperCAmelCase : Dict = self.get_tokenizer()
UpperCAmelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : Dict = '''lower newer'''
UpperCAmelCase : List[Any] = self.prepare_image_inputs()
UpperCAmelCase : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(lowercase_ ):
processor()
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : List[str] = self.get_image_processor()
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : str = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase : Tuple = processor.batch_decode(lowercase_ )
UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = self.get_image_processor()
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
UpperCAmelCase : Optional[int] = '''lower newer'''
UpperCAmelCase : Dict = self.prepare_image_inputs()
UpperCAmelCase : Any = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 708 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Any , lowercase_ : List[str]=7 , lowercase_ : List[Any]=3 , lowercase_ : Union[str, Any]=18 , lowercase_ : Dict=30 , lowercase_ : str=400 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=None , lowercase_ : int=True , lowercase_ : Optional[int]=None , lowercase_ : List[str]=True , lowercase_ : List[str]=[0.5, 0.5, 0.5] , lowercase_ : Optional[int]=[0.5, 0.5, 0.5] , ) -> Tuple:
UpperCAmelCase : Union[str, Any] = size if size is not None else {'shortest_edge': 18}
UpperCAmelCase : List[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : int = min_resolution
UpperCAmelCase : Any = max_resolution
UpperCAmelCase : Dict = do_resize
UpperCAmelCase : int = size
UpperCAmelCase : Optional[Any] = do_center_crop
UpperCAmelCase : List[Any] = crop_size
UpperCAmelCase : int = do_normalize
UpperCAmelCase : List[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Dict ) -> int:
UpperCAmelCase : List[str] = LevitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
UpperCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
pass
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Any = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : int = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : str ) -> List[str]:
# Initialize image_processing
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Tuple = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase__ : Any = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
lowercase__ : Optional[int] = logging.WARNING
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = os.getenv('DATASETS_VERBOSITY' , lowerCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCamelCase( ):
return __name__.split('.' )[0]
def UpperCamelCase( ):
return logging.getLogger(_get_library_name() )
def UpperCamelCase( ):
UpperCAmelCase : List[str] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCamelCase( ):
UpperCAmelCase : Dict = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCamelCase( UpperCAmelCase_ = None ):
if name is None:
UpperCAmelCase : Union[str, Any] = _get_library_name()
return logging.getLogger(lowerCamelCase__ )
def UpperCamelCase( ):
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase( UpperCAmelCase_ ):
_get_library_root_logger().setLevel(lowerCamelCase__ )
def UpperCamelCase( ):
return set_verbosity(lowerCamelCase__ )
def UpperCamelCase( ):
return set_verbosity(lowerCamelCase__ )
def UpperCamelCase( ):
return set_verbosity(lowerCamelCase__ )
def UpperCamelCase( ):
return set_verbosity(lowerCamelCase__ )
def UpperCamelCase( ):
UpperCAmelCase : Dict = False
def UpperCamelCase( ):
UpperCAmelCase : Optional[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A_ :
'''simple docstring'''
def __init__( self : List[str] , *lowercase_ : List[Any] , **lowercase_ : Dict ) -> Union[str, Any]: # pylint: disable=unused-argument
UpperCAmelCase : List[Any] = args[0] if args else None
def __iter__( self : str ) -> Optional[Any]:
return iter(self._iterator )
def __getattr__( self : Any , lowercase_ : Union[str, Any] ) -> int:
def empty_fn(*lowercase_ : List[Any] , **lowercase_ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> str:
return self
def __exit__( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : int ) -> List[str]:
return
lowercase__ : Optional[Any] = True
class A_ :
'''simple docstring'''
def __call__( self : Union[str, Any] , *lowercase_ : Union[str, Any] , lowercase_ : Tuple=False , **lowercase_ : List[Any] ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase_ , **UpperCamelCase_ )
else:
return EmptyTqdm(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self : int , *lowercase_ : Any , **lowercase_ : str ) -> int:
UpperCAmelCase : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ : List[str] = _tqdm_cls()
def UpperCamelCase( ):
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase( ):
global _tqdm_active
UpperCAmelCase : Dict = True
def UpperCamelCase( ):
global _tqdm_active
UpperCAmelCase : List[Any] = False
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A_ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : int ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = num_of_nodes
UpperCAmelCase : list[list[int]] = []
UpperCAmelCase : dict[int, int] = {}
def UpperCAmelCase_ ( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> Optional[Any]:
self.m_edges.append([u_node, v_node, weight] )
def UpperCAmelCase_ ( self : int , lowercase_ : int ) -> Optional[int]:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : int ) -> List[Any]:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase : Optional[int] = self.find_component(__A )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : list[int] , lowercase_ : int , lowercase_ : int ) -> Dict:
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase : Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase : Optional[int] = self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
UpperCAmelCase : str = []
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase : List[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase : List[Any] = edge
UpperCAmelCase : List[str] = self.m_component[u]
UpperCAmelCase : Dict = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase : Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
UpperCAmelCase : Dict = edge
UpperCAmelCase : Optional[Any] = self.m_component[u]
UpperCAmelCase : Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
UpperCAmelCase : Tuple = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCamelCase( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase__ = "\\n Text data.\n Second line of data."
lowercase__ = "file"
@pytest.fixture(scope='session' )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + """.zstd""")
UpperCAmelCase : Optional[Any] = bytes(__lowerCAmelCase , 'utf-8' )
with zstd.open(__lowerCAmelCase , 'wb' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture
def UpperCamelCase( UpperCAmelCase_ ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCAmelCase ) , 'w' ) as f:
f.write(__lowerCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCAmelCase : Dict = input_paths[compression_format]
UpperCAmelCase : Dict = tmp_path / """cache"""
UpperCAmelCase : Any = DownloadConfig(cache_dir=__lowerCAmelCase , extract_compressed_file=__lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
with open(__lowerCAmelCase ) as f:
UpperCAmelCase : Optional[int] = f.read()
with open(__lowerCAmelCase ) as f:
UpperCAmelCase : Dict = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = """custom_cache"""
UpperCAmelCase : Dict = """custom_extracted_dir"""
UpperCAmelCase : List[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCAmelCase : List[Any] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __lowerCAmelCase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__lowerCAmelCase ) )
UpperCAmelCase : List[str] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase : Optional[int] = xz_file
UpperCAmelCase : str = (
DownloadConfig(extract_compressed_file=__lowerCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCAmelCase )
)
UpperCAmelCase : List[str] = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
assert Path(__lowerCAmelCase ).parent.parts[-2:] == expected
def UpperCamelCase( UpperCAmelCase_ ):
# absolute path
UpperCAmelCase : Optional[int] = str(Path(__lowerCAmelCase ).resolve() )
assert cached_path(__lowerCAmelCase ) == text_file
# relative path
UpperCAmelCase : List[str] = str(Path(__lowerCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCAmelCase ) == text_file
def UpperCamelCase( UpperCAmelCase_ ):
# absolute path
UpperCAmelCase : Optional[Any] = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
# relative path
UpperCAmelCase : Union[str, Any] = """./__missing_file__.txt"""
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(__lowerCAmelCase ) as f:
UpperCAmelCase : Optional[int] = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def UpperCamelCase( ):
with pytest.raises(__lowerCAmelCase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
http_get('https://huggingface.co' , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
ftp_get('ftp://huggingface.co' , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
fsspec_get('s3://huggingface.co' , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
fsspec_head('s3://huggingface.co' )
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Any = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase : Dict = FileLock(str(tmpdir / 'foo.lock' ) )
UpperCAmelCase : int = 0.01
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
UpperCAmelCase : List[Any] = time.time()
locka.acquire(__UpperCAmelCase )
assert time.time() - _start > timeout
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = 'a' * 10_00 + '.lock'
UpperCAmelCase : Tuple = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__UpperCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase : Tuple = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
locka.acquire(0 )
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : List[str] = 3.0
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCAmelCase_ ( self : str ) -> str:
UpperCAmelCase : Tuple = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCAmelCase : Tuple = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCAmelCase : Optional[int] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : str = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase__ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase__ = torch.nn.Linear(100, 200)
lowercase__ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase__ = ""
lowercase__ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 714 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 0 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase__ = logging.getLogger(__name__)
class A_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str=None ) -> str:
super().__init__(
_UpperCamelCase , question_encoder_tokenizer=_UpperCamelCase , generator_tokenizer=_UpperCamelCase , index=_UpperCamelCase , init_retrieval=_UpperCamelCase , )
UpperCAmelCase : Optional[Any] = None
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int ) -> List[Any]:
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
UpperCAmelCase : str = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase : Dict = str(distributed_port + 1 )
UpperCAmelCase : str = dist.new_group(ranks=_UpperCamelCase , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Optional[Any]=torch.floataa ) -> Any:
UpperCAmelCase : Optional[Any] = torch.empty(_UpperCamelCase , dtype=_UpperCamelCase )
dist.scatter(_UpperCamelCase , src=0 , scatter_list=_UpperCamelCase , group=self.process_group )
return target_tensor
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : Optional[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase : Union[str, Any] = next((addr for addr in addrs if addr.startswith('e' )) , _UpperCamelCase )
return ifname
def UpperCAmelCase_ ( self : Any , lowercase_ : np.ndarray , lowercase_ : int ) -> Tuple[np.ndarray, List[dict]]:
if not dist.is_initialized():
UpperCAmelCase : Union[str, Any] = self._main_retrieve(_UpperCamelCase , _UpperCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCamelCase )
# distributed training
UpperCAmelCase : int = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase : int = None
if self._is_main():
UpperCAmelCase : Optional[int] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCamelCase )]
dist.gather(torch.tensor(_UpperCamelCase ) , dst=0 , gather_list=_UpperCamelCase , group=self.process_group )
# scatter logic
UpperCAmelCase : int = question_hidden_states.shape[0]
UpperCAmelCase : Any = []
UpperCAmelCase : Tuple = []
if self._is_main():
assert len(_UpperCamelCase ) == world_size
UpperCAmelCase : List[str] = self._main_retrieve(torch.cat(_UpperCamelCase ).numpy() , _UpperCamelCase )
UpperCAmelCase : Tuple = torch.tensor(_UpperCamelCase ), torch.tensor(_UpperCamelCase )
UpperCAmelCase : List[Any] = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase : str = self._chunk_tensor(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase : int = self._scattered(_UpperCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase : Dict = self._scattered(_UpperCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCamelCase )
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = get_activation('swish' )
self.assertIsInstance(_A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : str = get_activation('silu' )
self.assertIsInstance(_A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : Tuple = get_activation('mish' )
self.assertIsInstance(_A , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : Optional[int] = get_activation('gelu' )
self.assertIsInstance(_A , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 716 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase__ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : int = 14 ) -> None:
if group not in primes:
raise ValueError('Unsupported Group' )
UpperCAmelCase : Any = primes[group]["prime"]
UpperCAmelCase : str = primes[group]["generator"]
UpperCAmelCase : List[str] = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase_ ( self : str ) -> str:
return hex(self.__private_key )[2:]
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(__lowerCamelCase )[2:]
def UpperCAmelCase_ ( self : Dict , lowercase_ : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase_ ( self : Any , lowercase_ : str ) -> str:
UpperCAmelCase : List[str] = int(__lowerCamelCase , base=16 )
if not self.is_valid_public_key(__lowerCamelCase ):
raise ValueError('Invalid public key' )
UpperCAmelCase : str = pow(__lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(__lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase_ ( lowercase_ : int , lowercase_ : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__lowerCamelCase , (prime - 1) // 2 , __lowerCamelCase ) == 1
)
@staticmethod
def UpperCAmelCase_ ( lowercase_ : str , lowercase_ : str , lowercase_ : int = 14 ) -> str:
UpperCAmelCase : Any = int(__lowerCamelCase , base=16 )
UpperCAmelCase : Tuple = int(__lowerCamelCase , base=16 )
UpperCAmelCase : Optional[Any] = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(__lowerCamelCase , __lowerCamelCase ):
raise ValueError('Invalid public key' )
UpperCAmelCase : Dict = pow(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return shaaaa(str(__lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : Any = 1
UpperCAmelCase : Any = 3
UpperCAmelCase : str = (32, 32)
UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
torch.manual_seed(0 )
UpperCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(lowerCamelCase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Tuple = self.dummy_cond_unet_upscale
UpperCAmelCase : int = DDPMScheduler()
UpperCAmelCase : Optional[int] = DDIMScheduler(prediction_type='v_prediction' )
UpperCAmelCase : List[Any] = self.dummy_vae
UpperCAmelCase : Tuple = self.dummy_text_encoder
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=350 , )
UpperCAmelCase : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase : Union[str, Any] = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase : List[Any] = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase : Union[str, Any] = output.images
UpperCAmelCase : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase : Tuple = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
UpperCAmelCase : str = image[0, -3:, -3:, -1]
UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase : List[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
UpperCAmelCase : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet_upscale
UpperCAmelCase : Any = DDPMScheduler()
UpperCAmelCase : Optional[int] = DDIMScheduler(prediction_type='v_prediction' )
UpperCAmelCase : List[str] = self.dummy_vae
UpperCAmelCase : int = self.dummy_text_encoder
UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : List[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=350 , )
UpperCAmelCase : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase : int = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : List[str] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase : Union[str, Any] = output.images
assert image.shape[0] == 2
UpperCAmelCase : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase : Optional[Any] = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.dummy_cond_unet_upscale
UpperCAmelCase : Dict = DDPMScheduler()
UpperCAmelCase : str = DDIMScheduler(prediction_type='v_prediction' )
UpperCAmelCase : int = self.dummy_vae
UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
UpperCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase : List[str] = unet.half()
UpperCAmelCase : List[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase : int = StableDiffusionUpscalePipeline(
unet=lowerCamelCase_ , low_res_scheduler=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , max_noise_level=350 , )
UpperCAmelCase : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase : Any = '''A painting of a squirrel eating a burger'''
UpperCAmelCase : Any = torch.manual_seed(0 )
UpperCAmelCase : Dict = sd_pipe(
[prompt] , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='np' , ).images
UpperCAmelCase : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCAmelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
UpperCAmelCase : Dict = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCAmelCase : Tuple = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Dict = '''a cat sitting on a park bench'''
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Any = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='np' , )
UpperCAmelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
UpperCAmelCase : List[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Tuple = '''a cat sitting on a park bench'''
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , output_type='np' , )
UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
UpperCAmelCase : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : Optional[int] = '''a cat sitting on a park bench'''
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , output_type='np' , )
UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 718 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase__ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
'''simple docstring'''
def __init__( self : Dict , lowercase_ : List[str] , lowercase_ : Optional[int]=16 , lowercase_ : Union[str, Any]=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Dict=14 , lowercase_ : Tuple=10 , lowercase_ : Any=19 , lowercase_ : int=5 , lowercase_ : str=4 , lowercase_ : Dict=True , lowercase_ : Optional[int]=16 , lowercase_ : str=2 , lowercase_ : List[str]=4 , lowercase_ : int=4 , lowercase_ : Tuple="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Union[str, Any]=[1, 2, 3, 4, 5] , lowercase_ : Tuple=25 , lowercase_ : List[Any]=5 , ) -> List[Any]:
UpperCAmelCase : Optional[int] = d_model
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : Optional[int] = prediction_length
UpperCAmelCase : Optional[Any] = context_length
UpperCAmelCase : int = cardinality
UpperCAmelCase : Optional[int] = num_time_features
UpperCAmelCase : int = lags_sequence
UpperCAmelCase : List[Any] = embedding_dimension
UpperCAmelCase : int = is_training
UpperCAmelCase : int = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = context_length
UpperCAmelCase : Optional[Any] = prediction_length + label_length
UpperCAmelCase : List[str] = label_length
UpperCAmelCase : Any = moving_average
UpperCAmelCase : str = autocorrelation_factor
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Any = config.context_length + max(config.lags_sequence )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase : str = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase : List[str] = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
UpperCAmelCase : str = self.get_config()
UpperCAmelCase : Optional[Any] = self.prepare_autoformer_inputs_dict(__UpperCamelCase )
return config, inputs_dict
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : str , lowercase_ : Tuple ) -> Optional[Any]:
UpperCAmelCase : Tuple = AutoformerModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
UpperCAmelCase : Any = model(**__UpperCamelCase )
UpperCAmelCase : List[Any] = outputs.encoder_last_hidden_state
UpperCAmelCase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Optional[int] = model.get_encoder()
encoder.save_pretrained(__UpperCamelCase )
UpperCAmelCase : Optional[Any] = AutoformerEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = model.create_network_inputs(**__UpperCamelCase )
UpperCAmelCase , UpperCAmelCase : List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase : Dict = encoder(inputs_embeds=__UpperCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
UpperCAmelCase : Optional[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase : Tuple = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase : int = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase : Optional[int] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : List[Any] = model.get_decoder()
decoder.save_pretrained(__UpperCamelCase )
UpperCAmelCase : Dict = AutoformerDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCAmelCase : Optional[Any] = decoder(
trend=__UpperCamelCase , inputs_embeds=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
UpperCAmelCase_ : Any = (AutoformerForPrediction,) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : Tuple = AutoformerModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def UpperCAmelCase_ ( self : str ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCAmelCase , UpperCAmelCase : Dict = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase )
self.assertEqual(info['missing_keys'] , [] )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase )
@unittest.skip(reason='Model has no tokens embeddings' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
pass
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = inspect.signature(getattr(__UpperCamelCase , 'forward' ) )
# The main input is the name of the argument after `self`
UpperCAmelCase : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __UpperCamelCase )
def UpperCAmelCase_ ( self : Any ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[str] = model_class(__UpperCamelCase )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase : Optional[int] = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase )
def UpperCAmelCase_ ( self : int ) -> Any:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : str = True
UpperCAmelCase : Optional[Any] = getattr(self.model_tester , 'seq_length' , __UpperCamelCase )
UpperCAmelCase : Optional[Any] = getattr(self.model_tester , 'decoder_seq_length' , __UpperCamelCase )
UpperCAmelCase : Tuple = getattr(self.model_tester , 'encoder_seq_length' , __UpperCamelCase )
UpperCAmelCase : Optional[Any] = getattr(self.model_tester , 'd_model' , __UpperCamelCase )
UpperCAmelCase : Tuple = getattr(self.model_tester , 'num_attention_heads' , __UpperCamelCase )
UpperCAmelCase : Union[str, Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = True
UpperCAmelCase : List[Any] = False
UpperCAmelCase : str = True
UpperCAmelCase : Optional[int] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCAmelCase : Optional[Any] = outputs.encoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase : List[str] = len(__UpperCamelCase )
UpperCAmelCase : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# decoder attentions
UpperCAmelCase : Tuple = outputs.decoder_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase : List[str] = outputs.cross_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[int] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 2 , len(__UpperCamelCase ) )
UpperCAmelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
super().test_retain_grad_hidden_states_attentions()
def UpperCamelCase( UpperCAmelCase_="train-batch.pt" ):
UpperCAmelCase : Optional[Any] = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_A , repo_type='dataset' )
UpperCAmelCase : List[str] = torch.load(_A , map_location=_A )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(__UpperCamelCase )
UpperCAmelCase : int = prepare_batch()
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
UpperCAmelCase : str = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __UpperCamelCase )
UpperCAmelCase : Optional[Any] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def UpperCAmelCase_ ( self : int ) -> List[str]:
UpperCAmelCase : Optional[Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(__UpperCamelCase )
UpperCAmelCase : Optional[int] = prepare_batch('val-batch.pt' )
with torch.no_grad():
UpperCAmelCase : Dict = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
UpperCAmelCase : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __UpperCamelCase )
UpperCAmelCase : Any = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def UpperCAmelCase_ ( self : Dict ) -> int:
UpperCAmelCase : Optional[int] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(__UpperCamelCase )
UpperCAmelCase : List[Any] = prepare_batch('val-batch.pt' )
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
UpperCAmelCase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __UpperCamelCase )
UpperCAmelCase : Tuple = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=__UpperCamelCase )
UpperCAmelCase : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __UpperCamelCase , rtol=1E-1 ) )
| 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if not head:
return True
# split the list to two parts
UpperCAmelCase , UpperCAmelCase : Dict = head.next, head
while fast and fast.next:
UpperCAmelCase : List[Any] = fast.next.next
UpperCAmelCase : Optional[Any] = slow.next
UpperCAmelCase : str = slow.next
UpperCAmelCase : Union[str, Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase : Optional[int] = None
while second:
UpperCAmelCase : int = second.next
UpperCAmelCase : Union[str, Any] = node
UpperCAmelCase : Optional[Any] = second
UpperCAmelCase : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase : Optional[Any] = node.next
UpperCAmelCase : int = head.next
return True
def UpperCamelCase( UpperCAmelCase_ ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase : List[str] = head
while fast and fast.next:
UpperCAmelCase , UpperCAmelCase : str = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase : List[Any] = [slow.val]
while slow.next:
UpperCAmelCase : Optional[int] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase : Optional[Any] = cur.next
return True
def UpperCamelCase( UpperCAmelCase_ ):
if not head or not head.next:
return True
UpperCAmelCase : str = {}
UpperCAmelCase : Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase_ )
else:
UpperCAmelCase : int = [pos]
UpperCAmelCase : List[str] = head.next
pos += 1
UpperCAmelCase : int = pos - 1
UpperCAmelCase : List[str] = 0
for v in d.values():
if len(UpperCAmelCase_ ) % 2 != 0:
middle += 1
else:
UpperCAmelCase : List[str] = 0
for i in range(0 , len(UpperCAmelCase_ ) ):
if v[i] + v[len(UpperCAmelCase_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
'''simple docstring'''
lowercase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = [False] * len(__UpperCamelCase )
UpperCAmelCase : str = [s]
UpperCAmelCase : Tuple = True
while queue:
UpperCAmelCase : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCamelCase )
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = u
return visited[t]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = [-1] * (len(__UpperCamelCase ))
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase : Dict = float('Inf' )
UpperCAmelCase : List[Any] = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : Optional[int] = min(__UpperCamelCase , graph[parent[s]][s] )
UpperCAmelCase : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase : Tuple = sink
while v != source:
UpperCAmelCase : int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : Optional[int] = parent[v]
for i in range(len(__UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 721 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ : Optional[Any] = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = MBartConfig
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : List[str] = 'gelu'
def __init__( self : Optional[int] , lowercase_ : Any , lowercase_ : Union[str, Any]=13 , lowercase_ : Optional[int]=7 , lowercase_ : List[Any]=True , lowercase_ : List[str]=False , lowercase_ : str=99 , lowercase_ : Dict=32 , lowercase_ : List[str]=2 , lowercase_ : str=4 , lowercase_ : str=37 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=20 , lowercase_ : Any=2 , lowercase_ : Optional[Any]=1 , lowercase_ : List[Any]=0 , ) -> Optional[int]:
UpperCAmelCase : Dict = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Optional[int] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Optional[Any] = use_labels
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : List[Any] = pad_token_id
UpperCAmelCase : List[str] = bos_token_id
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : Union[str, Any] = prepare_mbart_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Dict ) -> Dict:
UpperCAmelCase : str = TFMBartModel(config=lowercase_ ).get_decoder()
UpperCAmelCase : Optional[Any] = inputs_dict['input_ids']
UpperCAmelCase : Dict = input_ids[:1, :]
UpperCAmelCase : Optional[Any] = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase : Union[str, Any] = inputs_dict['head_mask']
UpperCAmelCase : Tuple = 1
# first forward pass
UpperCAmelCase : List[Any] = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[Any] = outputs.to_tuple()
UpperCAmelCase : Union[str, Any] = past_key_values[1]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , ):
if attention_mask is None:
UpperCAmelCase : str = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase_ : Any = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase_ : Any = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : Dict = False
def UpperCAmelCase_ ( self : Any , lowercase_ : Dict , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> Any:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = TFMBartModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=lowercase_ )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
]
UpperCAmelCase_ : Optional[int] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
UpperCAmelCase_ : int = 'facebook/mbart-large-en-ro'
@cached_property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self : int ) -> Any:
UpperCAmelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ ( self : int , **lowercase_ : List[str] ) -> Dict:
UpperCAmelCase : str = self.translate_src_text(**lowercase_ )
self.assertListEqual(self.expected_text , lowercase_ )
def UpperCAmelCase_ ( self : List[str] , **lowercase_ : Union[str, Any] ) -> Tuple:
UpperCAmelCase : str = self.tokenizer(self.src_text , **lowercase_ , return_tensors='tf' )
UpperCAmelCase : str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase : Union[str, Any] = self.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
return generated_words
@slow
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
self._assert_generated_batch_equal_expected()
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Optional[int] = np.zeros((n + 1,) )
UpperCAmelCase : Union[str, Any] = ya
UpperCAmelCase : Optional[int] = xa
for k in range(UpperCAmelCase_ ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(UpperCAmelCase_ , y[k] )
UpperCAmelCase : List[str] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase_ , y[k] ) + ode_func(x + step_size , UpperCAmelCase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class A_ :
'''simple docstring'''
def __init__( self : str ) -> List[str]:
UpperCAmelCase : List[str] = psutil.Process()
UpperCAmelCase : int = False
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
UpperCAmelCase : List[str] = -1
while True:
UpperCAmelCase : List[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = threading.Thread(target=self.peak_monitor )
UpperCAmelCase : Dict = True
self.thread.start()
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : str = False
self.thread.join()
return self.cpu_memory_peak
lowercase__ = PeakCPUMemory()
def UpperCamelCase( ):
# Time
UpperCAmelCase : int = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase : Dict = torch.cuda.memory_allocated(snake_case_ )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCamelCase( UpperCAmelCase_ ):
# Time
UpperCAmelCase : Tuple = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase : int = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
UpperCAmelCase : Dict = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase : Dict = (torch.cuda.memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
UpperCAmelCase : Optional[Any] = (torch.cuda.max_memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
return measures
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(snake_case_ )]:.2f}MiB""" )
UpperCAmelCase : Tuple = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 703 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase__ = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowercase__ = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowercase__ = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def UpperCAmelCase_ ( self : str , lowercase_ : List[List[List[str]]] , lowercase_ : List[List[str]] , lowercase_ : int = 1 , lowercase_ : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase_ , hypotheses=lowercase_ , min_len=lowercase_ , max_len=lowercase_ )
}
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
'''simple docstring'''
import numpy as np
def UpperCamelCase( UpperCAmelCase_ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( __a ):
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Tuple ) -> int:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , a_ , )
super().__init__(*a_ , **a_ )
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = []
for part_id in partition_order:
UpperCAmelCase : int = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase( ):
UpperCAmelCase : List[str] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCAmelCase : List[str] = spark.range(1_00 ).repartition(1 )
UpperCAmelCase : List[Any] = Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase( ):
UpperCAmelCase : int = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCAmelCase : Optional[int] = spark.range(10 ).repartition(2 )
UpperCAmelCase : List[Any] = [1, 0]
UpperCAmelCase : Any = _generate_iterable_examples(__snake_case , __snake_case ) # Reverse the partitions.
UpperCAmelCase : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case , __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase , UpperCAmelCase : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCAmelCase : Dict = spark.range(10 ).repartition(1 )
UpperCAmelCase : Optional[Any] = SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase( ):
UpperCAmelCase : List[str] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCAmelCase : Union[str, Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
UpperCAmelCase : Optional[int] = lambda UpperCAmelCase_ : x.reverse()
UpperCAmelCase : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case , [2, 1, 0] )
UpperCAmelCase : Tuple = SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
UpperCAmelCase , UpperCAmelCase : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase( ):
UpperCAmelCase : Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCAmelCase : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase : List[str] = SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case , [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase : Optional[int] = SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case , [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase( ):
UpperCAmelCase : int = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
UpperCAmelCase : Optional[Any] = spark.range(1_00 ).repartition(1 )
UpperCAmelCase : List[str] = Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
import os
from math import logaa
def UpperCamelCase( UpperCAmelCase_ = "base_exp.txt" ):
UpperCAmelCase : float = 0
UpperCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCAmelCase__ ) , UpperCAmelCase__ ) ) ):
UpperCAmelCase : Optional[int] = list(map(UpperCAmelCase__ , line.split(',' ) ) )
if x * logaa(UpperCAmelCase__ ) > largest:
UpperCAmelCase : Tuple = x * logaa(UpperCAmelCase__ )
UpperCAmelCase : int = i + 1
return result
if __name__ == "__main__":
print(solution())
| 708 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
lowercase__ : int = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowercase__ : Optional[int] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowercase__ : str = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def UpperCAmelCase_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any]=None , lowercase_ : str="uniform_average" , lowercase_ : List[Any]=True ) -> Any:
UpperCAmelCase : Union[str, Any] = mean_squared_error(
__lowerCAmelCase , __lowerCAmelCase , sample_weight=__lowerCAmelCase , multioutput=__lowerCAmelCase , squared=__lowerCAmelCase )
return {"mse": mse}
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowercase__ = {"facebook/blenderbot_small-90M": 512}
def UpperCamelCase( UpperCAmelCase_ ) -> List[str]:
UpperCAmelCase : Any = set()
UpperCAmelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : int = char
UpperCAmelCase : Any = set(_snake_case )
return pairs
class A_ ( __UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : int , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int]="__start__" , lowercase_ : List[str]="__end__" , lowercase_ : List[Any]="__unk__" , lowercase_ : Optional[Any]="__null__" , **lowercase_ : List[Any] , ) -> str:
super().__init__(unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , **UpperCAmelCase_ )
with open(UpperCAmelCase_ , encoding='utf-8' ) as vocab_handle:
UpperCAmelCase : str = json.load(UpperCAmelCase_ )
UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8' ) as merges_handle:
UpperCAmelCase : Optional[int] = merges_handle.read().split('\n' )[1:-1]
UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in merges]
UpperCAmelCase : int = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
UpperCAmelCase : List[str] = {}
@property
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
return len(self.encoder )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[str] ) -> List[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase : Dict = re.sub('([.,!?()])' , R' \1' , UpperCAmelCase_ )
UpperCAmelCase : Dict = re.sub('(\')' , R' \1 ' , UpperCAmelCase_ )
UpperCAmelCase : str = re.sub(R'\s{2,}' , ' ' , UpperCAmelCase_ )
if "\n" in token:
UpperCAmelCase : Tuple = token.replace('\n' , ' __newln__' )
UpperCAmelCase : int = token.split(' ' )
UpperCAmelCase : Optional[int] = []
for token in tokens:
if not len(UpperCAmelCase_ ):
continue
UpperCAmelCase : int = token.lower()
UpperCAmelCase : List[Any] = tuple(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
UpperCAmelCase : Union[str, Any] = get_pairs(UpperCAmelCase_ )
if not pairs:
words.append(UpperCAmelCase_ )
continue
while True:
UpperCAmelCase : List[str] = min(UpperCAmelCase_ , key=lambda lowercase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : Optional[int] = bigram
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = 0
while i < len(UpperCAmelCase_ ):
try:
UpperCAmelCase : Optional[Any] = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
new_word.extend(word[i:j] )
UpperCAmelCase : List[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Dict = tuple(UpperCAmelCase_ )
UpperCAmelCase : Dict = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
UpperCAmelCase : int = get_pairs(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = '@@ '.join(UpperCAmelCase_ )
UpperCAmelCase : str = word[:-4]
UpperCAmelCase : Optional[Any] = word
words.append(UpperCAmelCase_ )
return " ".join(UpperCAmelCase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : List[Any] ) -> Tuple:
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : List[Any] = re.findall(R'\S+\n?' , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(' ' ) ) )
return split_tokens
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[str] ) -> Optional[int]:
UpperCAmelCase : str = token.lower()
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Any ) -> List[str]:
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : List[Any] = ' '.join(UpperCAmelCase_ ).replace('@@ ' , '' ).strip()
return out_string
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] = None ) -> Optional[Any]:
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase : Any = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + '\n' )
UpperCAmelCase : Union[str, Any] = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
UpperCAmelCase : Tuple = token_index
writer.write(' '.join(UpperCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 711 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : int ) -> List[str]:
UpperCAmelCase : Dict = num_of_nodes
UpperCAmelCase : list[list[int]] = []
UpperCAmelCase : dict[int, int] = {}
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> Any:
self.m_edges.append([u_node, v_node, weight] )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> Tuple:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> Dict:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase : Optional[int] = self.find_component(UpperCamelCase__ )
def UpperCAmelCase_ ( self : str , lowercase_ : list[int] , lowercase_ : int , lowercase_ : int ) -> Tuple:
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase : int = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase__ )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase : Optional[int] = self.find_component(UpperCamelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : int = 0
UpperCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase : int = edge
UpperCAmelCase : Union[str, Any] = self.m_component[u]
UpperCAmelCase : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase : str = edge
UpperCAmelCase : List[str] = self.m_component[u]
UpperCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
UpperCAmelCase : Union[str, Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCamelCase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase_ : List[Any] = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase_ : Dict = """document_qa"""
UpperCAmelCase_ : Optional[Any] = AutoProcessor
UpperCAmelCase_ : str = VisionEncoderDecoderModel
UpperCAmelCase_ : int = ["""image""", """text"""]
UpperCAmelCase_ : Tuple = ["""text"""]
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Dict:
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> str:
UpperCAmelCase : Dict = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
UpperCAmelCase : Optional[int] = task_prompt.replace('{user_input}' , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.pre_processor.tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids
UpperCAmelCase : Union[str, Any] = self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[Any] ) -> List[str]:
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_SCREAMING_SNAKE_CASE , ).sequences
def UpperCAmelCase_ ( self : str , lowercase_ : List[str] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
UpperCAmelCase : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
UpperCAmelCase : int = re.sub(R'<.*?>' , '' , _SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
UpperCAmelCase : int = self.pre_processor.tokenajson(_SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ) -> Optional[Any]:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = len(set_a.intersection(UpperCAmelCase_ ) )
if alternative_union:
UpperCAmelCase : int = len(UpperCAmelCase_ ) + len(UpperCAmelCase_ )
else:
UpperCAmelCase : Optional[int] = len(set_a.union(UpperCAmelCase_ ) )
return intersection / union
if isinstance(UpperCAmelCase_ , (list, tuple) ) and isinstance(UpperCAmelCase_ , (list, tuple) ):
UpperCAmelCase : Optional[Any] = [element for element in set_a if element in set_b]
if alternative_union:
UpperCAmelCase : Dict = len(UpperCAmelCase_ ) + len(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) / union
else:
UpperCAmelCase : Union[str, Any] = set_a + [element for element in set_b if element not in set_a]
return len(UpperCAmelCase_ ) / len(UpperCAmelCase_ )
return len(UpperCAmelCase_ ) / len(UpperCAmelCase_ )
return None
if __name__ == "__main__":
lowercase__ = {"a", "b", "c", "d", "e"}
lowercase__ = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 714 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ = logging.getLogger(__name__)
def UpperCamelCase( UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=16 , UpperCAmelCase_ = 10 , UpperCAmelCase_ = 2 ):
def get_dataset(UpperCAmelCase_ ):
UpperCAmelCase : Dict = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase : Tuple = get_dataset(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : Any = get_dataset(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : Optional[int] = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
UpperCAmelCase : int = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase : Optional[int] = []
for epoch in range(SCREAMING_SNAKE_CASE_ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = batch
UpperCAmelCase : str = model(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : Any = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
super().__init__()
UpperCAmelCase : List[str] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase : List[str] = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> Optional[Any]:
return x * self.a + self.b
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Optional[int] = DummyModel()
UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : int = dummy_dataloaders()
UpperCAmelCase : Tuple = ProjectConfiguration(total_limit=1 , project_dir=__lowercase , automatic_checkpoint_naming=__lowercase )
# Train baseline
UpperCAmelCase : List[Any] = Accelerator(project_config=__lowercase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : str = DummyModel()
UpperCAmelCase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase : Any = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
UpperCAmelCase : str = os.path.join(__lowercase , 'initial' )
accelerator.save_state(__lowercase )
((UpperCAmelCase) , (UpperCAmelCase)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase : Tuple = optimizer.state_dict()
UpperCAmelCase : int = train(3 , __lowercase , __lowercase , __lowercase , __lowercase )
((UpperCAmelCase) , (UpperCAmelCase)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase : Dict = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase : Any = DummyModel()
UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase : Union[str, Any] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
accelerator.load_state(__lowercase )
((UpperCAmelCase) , (UpperCAmelCase)) : int = model.a.item(), model.b.item()
UpperCAmelCase : List[Any] = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
UpperCAmelCase : List[str] = train(2 , __lowercase , __lowercase , __lowercase , __lowercase )
# Save everything
UpperCAmelCase : str = os.path.join(__lowercase , 'checkpoint' )
accelerator.save_state(__lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(__lowercase )
test_rands += train(1 , __lowercase , __lowercase , __lowercase , __lowercase )
((UpperCAmelCase) , (UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase : Optional[Any] = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Tuple = DummyModel()
UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Optional[int] = dummy_dataloaders()
UpperCAmelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
UpperCAmelCase : str = Accelerator(project_dir=__lowercase , project_config=__lowercase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
accelerator.save_state()
((UpperCAmelCase) , (UpperCAmelCase)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase : Union[str, Any] = optimizer.state_dict()
UpperCAmelCase : Optional[Any] = train(3 , __lowercase , __lowercase , __lowercase , __lowercase )
((UpperCAmelCase) , (UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCAmelCase : Dict = DummyModel()
UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = dummy_dataloaders()
UpperCAmelCase : int = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__lowercase )
UpperCAmelCase : str = Accelerator(project_dir=__lowercase , project_config=__lowercase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase )
accelerator.load_state(os.path.join(__lowercase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase) , (UpperCAmelCase)) : str = model.a.item(), model.b.item()
UpperCAmelCase : Optional[Any] = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
UpperCAmelCase : str = train(2 , __lowercase , __lowercase , __lowercase , __lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , __lowercase , __lowercase , __lowercase , __lowercase )
((UpperCAmelCase) , (UpperCAmelCase)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase : Tuple = optimizer.state_dict()
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
UpperCAmelCase : str = torch.tensor([1, 2, 3] )
UpperCAmelCase : str = torch.tensor([2, 3, 4] )
UpperCAmelCase : Any = DummyModel()
UpperCAmelCase : Optional[int] = torch.optim.Adam(net.parameters() )
UpperCAmelCase : int = Accelerator()
with self.assertRaises(__lowercase ) as ve:
accelerator.register_for_checkpointing(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Tuple = DummyModel()
UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase : Any = torch.optim.lr_scheduler.StepLR(__lowercase , step_size=1 , gamma=0.99 )
UpperCAmelCase , UpperCAmelCase : Optional[int] = dummy_dataloaders()
UpperCAmelCase : int = ProjectConfiguration(automatic_checkpoint_naming=__lowercase )
# Train baseline
UpperCAmelCase : Dict = Accelerator(project_dir=__lowercase , project_config=__lowercase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save initial
accelerator.save_state()
UpperCAmelCase : Dict = scheduler.state_dict()
train(3 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertNotEqual(__lowercase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__lowercase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(__lowercase , scheduler.state_dict() )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCAmelCase : Optional[Any] = DummyModel()
UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=__lowercase , total_limit=2 )
# Train baseline
UpperCAmelCase : str = Accelerator(project_dir=__lowercase , project_config=__lowercase )
UpperCAmelCase : Dict = accelerator.prepare(__lowercase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__lowercase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowercase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Any = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowercase , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ = "/tmp/accelerate/state_checkpointing"
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
lowercase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class A_ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : List[Any] ) -> Dict:
UpperCAmelCase : List[str] = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase : dict[str, str | None] = {}
UpperCAmelCase : Dict = source_vertex
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
UpperCAmelCase : Dict = {self.source_vertex}
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : List[Any] = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase : List[str] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(A_ )
UpperCAmelCase : Dict = vertex
queue.append(A_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Dict ) -> List[Any]:
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase : Any = self.parent.get(A_ )
if target_vertex_parent is None:
UpperCAmelCase : List[str] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(A_ )
return self.shortest_path(A_ ) + f"""->{target_vertex}"""
if __name__ == "__main__":
lowercase__ = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 716 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if length <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(UpperCAmelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 717 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 0 |
def UpperCamelCase( ):
UpperCAmelCase : str = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 718 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , ) -> int:
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : int = 13
UpperCAmelCase : Dict = 7
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = True
UpperCAmelCase : List[Any] = 99
UpperCAmelCase : Dict = 32
UpperCAmelCase : str = 2
UpperCAmelCase : List[str] = 4
UpperCAmelCase : int = 37
UpperCAmelCase : Optional[Any] = 'gelu'
UpperCAmelCase : List[str] = 0.1
UpperCAmelCase : str = 0.1
UpperCAmelCase : List[str] = 512
UpperCAmelCase : Any = 16
UpperCAmelCase : str = 2
UpperCAmelCase : Optional[Any] = 0.02
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Tuple = 4
UpperCAmelCase : Any = None
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] = None
if self.use_input_mask:
UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[Any] ) -> List[Any]:
UpperCAmelCase : List[str] = TFDistilBertModel(config=lowerCAmelCase_ )
UpperCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase : List[Any] = model(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = [input_ids, input_mask]
UpperCAmelCase : Optional[int] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Any ) -> Optional[Any]:
UpperCAmelCase : List[str] = TFDistilBertForMaskedLM(config=lowerCAmelCase_ )
UpperCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Dict = TFDistilBertForQuestionAnswering(config=lowerCAmelCase_ )
UpperCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
UpperCAmelCase : Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[Any] ) -> str:
UpperCAmelCase : Union[str, Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = TFDistilBertForSequenceClassification(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase : List[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : str , lowercase_ : int , lowercase_ : Optional[int] ) -> Dict:
UpperCAmelCase : Any = self.num_choices
UpperCAmelCase : List[Any] = TFDistilBertForMultipleChoice(lowerCAmelCase_ )
UpperCAmelCase : Optional[int] = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : int = tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase : Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
UpperCAmelCase : Optional[Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : str ) -> Tuple:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : int = TFDistilBertForTokenClassification(lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
UpperCAmelCase : Any = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
UpperCAmelCase : int = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Tuple = config_and_inputs
UpperCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A_ ( __a , __a , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase_ : int = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : str = False
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : Union[str, Any] = TFDistilBertModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def UpperCAmelCase_ ( self : str ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase_ )
@slow
def UpperCAmelCase_ ( self : int ) -> Any:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase : str = TFDistilBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase : Tuple = model(lowerCAmelCase_ )[0]
UpperCAmelCase : Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase_ )
UpperCAmelCase : Tuple = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 )
| 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 0 |
'''simple docstring'''
lowercase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowercase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = True
UpperCAmelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = True
UpperCAmelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : int = len(UpperCamelCase__ ) * [False]
UpperCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
UpperCAmelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase : Tuple = []
UpperCAmelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
UpperCAmelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
UpperCAmelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
'''simple docstring'''
import re
def UpperCamelCase( UpperCAmelCase_ ):
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
try:
UpperCAmelCase : List[str] = split_input(_UpperCamelCase )
if upper:
UpperCAmelCase : int = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase : Union[str, Any] = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def UpperCamelCase( UpperCAmelCase_ ):
return to_simple_case(_UpperCamelCase )
def UpperCamelCase( UpperCAmelCase_ ):
try:
UpperCAmelCase : Any = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '_' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return to_complex_case(_UpperCamelCase , _UpperCamelCase , '-' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 721 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase__ : str = pd.read_csv("sample_data.csv", header=None)
lowercase__ : Optional[int] = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase__ : Optional[int] = df.iloc[:, 1:2]
lowercase__ : Tuple = actual_data.values.reshape(len_data, 1)
lowercase__ : List[str] = MinMaxScaler().fit_transform(actual_data)
lowercase__ : Union[str, Any] = 10
lowercase__ : Optional[int] = 5
lowercase__ : int = 20
lowercase__ : Dict = len_data - periods * look_back
lowercase__ : int = actual_data[:division]
lowercase__ : Dict = actual_data[division - look_back :]
lowercase__ , lowercase__ : List[str] = [], []
lowercase__ , lowercase__ : str = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase__ : Any = np.array(train_x)
lowercase__ : str = np.array(test_x)
lowercase__ : Dict = np.array([list(i.ravel()) for i in train_y])
lowercase__ : Tuple = np.array([list(i.ravel()) for i in test_y])
lowercase__ : Dict = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
lowercase__ : Optional[int] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowercase__ : Any = model.predict(x_test)
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt"}
lowercase__ = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
lowercase__ = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
lowercase__ = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Any = ConvBertTokenizer
def __init__( self : Any , lowercase_ : int=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=True , lowercase_ : Optional[Any]="[UNK]" , lowercase_ : Optional[Any]="[SEP]" , lowercase_ : int="[PAD]" , lowercase_ : Any="[CLS]" , lowercase_ : List[str]="[MASK]" , lowercase_ : Optional[Any]=True , lowercase_ : Optional[int]=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
UpperCAmelCase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase_ ) != tokenize_chinese_chars
):
UpperCAmelCase : List[Any] = getattr(lowercase_ , normalizer_state.pop('type' ) )
UpperCAmelCase : Any = do_lower_case
UpperCAmelCase : str = strip_accents
UpperCAmelCase : Optional[Any] = tokenize_chinese_chars
UpperCAmelCase : List[str] = normalizer_class(**lowercase_ )
UpperCAmelCase : Dict = do_lower_case
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Any , lowercase_ : List[str]=None ) -> str:
UpperCAmelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase : List[str] = [self.sep_token_id]
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Dict = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
lowercase__ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class A_ ( __snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
UpperCAmelCase_ : Any = MBartTokenizer
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Tuple = []
def __init__( self : List[str] , lowercase_ : Tuple=None , lowercase_ : Optional[int]=None , lowercase_ : List[Any]="<s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : Dict="</s>" , lowercase_ : Tuple="<s>" , lowercase_ : Union[str, Any]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Any="<mask>" , lowercase_ : Any=None , lowercase_ : List[str]=None , lowercase_ : Optional[int]=None , **lowercase_ : List[str] , ) -> int:
UpperCAmelCase : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
vocab_file=__UpperCamelCase , tokenizer_file=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , )
UpperCAmelCase : Tuple = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
UpperCAmelCase : Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCAmelCase : List[str] = {
lang_code: self.convert_tokens_to_ids(__UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : Optional[int] = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase : Tuple = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self : str , lowercase_ : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self : str , lowercase_ : int , lowercase_ : Tuple = None ) -> Tuple:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : Any = None ) -> Tuple:
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Tuple , **lowercase_ : Union[str, Any] ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase : Dict = src_lang
UpperCAmelCase : Union[str, Any] = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(__UpperCamelCase )
UpperCAmelCase : Tuple = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self : str , lowercase_ : List[str] , lowercase_ : List[str] = "en_XX" , lowercase_ : Optional[Any] = None , lowercase_ : List[Any] = "ro_RO" , **lowercase_ : Optional[Any] , ) -> int:
UpperCAmelCase : List[Any] = src_lang
UpperCAmelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase_ ( self : str ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(__UpperCamelCase )
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Dict ) -> int:
UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(__UpperCamelCase )
UpperCAmelCase : Any = []
UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] = None ) -> Optional[Any]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase : Optional[Any] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase__ = 3
def UpperCamelCase( UpperCAmelCase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase : Tuple = random.randrange(3 , lowerCAmelCase_ )
if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1:
continue
if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1:
continue
return g
def UpperCamelCase( UpperCAmelCase_ ):
print('Generating prime p...' )
UpperCAmelCase : str = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number.
UpperCAmelCase : int = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p.
UpperCAmelCase : Tuple = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase : Any = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
UpperCAmelCase : Optional[Any] = (key_size, e_a, e_a, p)
UpperCAmelCase : Optional[int] = (key_size, d)
return public_key, private_key
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase : Tuple = generate_key(lowerCAmelCase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def UpperCamelCase( ):
print('Making key files...' )
make_key_files('elgamal' , 20_48 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ): # noqa: E741
UpperCAmelCase : List[Any] = len(a_ )
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = [0] * n
UpperCAmelCase : Optional[int] = [False] * n
UpperCAmelCase : Optional[int] = [False] * n
def dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if parent == root:
out_edge_count += 1
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase : str = dfs(a_ , a_ , a_ , a_ )
UpperCAmelCase : List[Any] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase : Optional[int] = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase : str = True
else:
UpperCAmelCase : Any = min(low[at] , a_ )
return out_edge_count
for i in range(a_ ):
if not visited[i]:
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = dfs(a_ , a_ , -1 , a_ )
UpperCAmelCase : List[str] = out_edge_count > 1
for x in range(len(a_ ) ):
if is_art[x] is True:
print(a_ )
# Adjacency list of graph
lowercase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
inspect_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = path + '.py'
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
inspect_metric(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = path + '.py'
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
with pytest.raises(UpperCAmelCase_ ):
get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = get_dataset_config_names(UpperCAmelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = get_dataset_infos(UpperCAmelCase_ )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase : Optional[int] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = get_dataset_infos(UpperCAmelCase_ )
assert expected_config in infos
UpperCAmelCase : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
with pytest.raises(UpperCAmelCase_ ):
get_dataset_split_names(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = 0.00
UpperCAmelCase : int = 0
for resistor in resistors:
if resistor <= 0:
UpperCAmelCase : str = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowercase_ )
first_sum += 1 / float(lowercase_ )
index += 1
return 1 / first_sum
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = 0.00
UpperCAmelCase : Union[str, Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCAmelCase : int = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowercase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
UpperCAmelCase : List[str] = False
if num < 0:
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Any = -num
UpperCAmelCase : Dict = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCamelCase ) for e in binary )
return "0b" + "".join(str(__UpperCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 0 |
'''simple docstring'''
lowercase__ = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
lowercase__ = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
UpperCAmelCase : str = (
F"""Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n"""
F"""Valid values are: {", ".join(UpperCAmelCase_ )}"""
)
raise ValueError(UpperCAmelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Any = {
"shi-labs/dinat-mini-in1k-224": "https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A_ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Any = """dinat"""
UpperCAmelCase_ : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , lowercase_ : Any=4 , lowercase_ : Dict=3 , lowercase_ : List[Any]=64 , lowercase_ : Tuple=[3, 4, 6, 5] , lowercase_ : int=[2, 4, 8, 16] , lowercase_ : List[Any]=7 , lowercase_ : Optional[Any]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowercase_ : Optional[Any]=3.0 , lowercase_ : str=True , lowercase_ : Any=0.0 , lowercase_ : Dict=0.0 , lowercase_ : Dict=0.1 , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[str]=0.02 , lowercase_ : Optional[Any]=1E-5 , lowercase_ : List[str]=0.0 , lowercase_ : Any=None , lowercase_ : List[Any]=None , **lowercase_ : Dict , ) -> Optional[Any]:
super().__init__(**_a )
UpperCAmelCase : Union[str, Any] = patch_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : List[str] = embed_dim
UpperCAmelCase : Dict = depths
UpperCAmelCase : str = len(_a )
UpperCAmelCase : int = num_heads
UpperCAmelCase : Any = kernel_size
UpperCAmelCase : Tuple = dilations
UpperCAmelCase : int = mlp_ratio
UpperCAmelCase : str = qkv_bias
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = drop_path_rate
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase : Any = int(embed_dim * 2 ** (len(_a ) - 1) )
UpperCAmelCase : List[Any] = layer_scale_init_value
UpperCAmelCase : int = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_a ) + 1 )]
UpperCAmelCase : Dict = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowercase__ = namedtuple("covid_data", "cases deaths recovered")
def UpperCamelCase( UpperCAmelCase_ = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
UpperCAmelCase : str = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__UpperCamelCase ).content ).xpath(__UpperCamelCase ) )
lowercase__ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 711 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
lowercase__ = list[tuple[int, int]]
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : str , lowercase_ : List[Any] , ) -> Optional[Any]:
UpperCAmelCase : List[Any] = pos_x
UpperCAmelCase : str = pos_y
UpperCAmelCase : Any = (pos_y, pos_x)
UpperCAmelCase : Optional[int] = goal_x
UpperCAmelCase : List[str] = goal_y
UpperCAmelCase : Tuple = g_cost
UpperCAmelCase : Dict = parent
UpperCAmelCase : Optional[Any] = self.calculate_heuristic()
def UpperCAmelCase_ ( self : Dict ) -> float:
UpperCAmelCase : Any = abs(self.pos_x - self.goal_x )
UpperCAmelCase : int = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , lowercase_ : Tuple ) -> bool:
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : int ) -> str:
UpperCAmelCase : str = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _lowerCamelCase )
UpperCAmelCase : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _lowerCamelCase )
UpperCAmelCase : Any = [self.start]
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Any = False
def UpperCAmelCase_ ( self : int ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase : Dict = True
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
UpperCAmelCase : Any = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
UpperCAmelCase : Dict = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[int] ) -> list[Node]:
UpperCAmelCase : List[Any] = []
for action in delta:
UpperCAmelCase : Dict = parent.pos_x + action[1]
UpperCAmelCase : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase , _lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _lowerCamelCase , ) )
return successors
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : str ) -> Path:
UpperCAmelCase : Optional[Any] = node
UpperCAmelCase : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
lowercase__ = GreedyBestFirst(init, goal)
lowercase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase__ = 2
for elem in grid:
print(elem)
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def UpperCamelCase( *UpperCAmelCase_ ):
with open(__snake_case , 'r' ) as fh:
fcntl.flock(__snake_case , fcntl.LOCK_EX )
try:
print(*__snake_case )
finally:
fcntl.flock(__snake_case , fcntl.LOCK_UN )
lowercase__ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowercase__ = torch.device("cuda", local_rank)
lowercase__ = socket.gethostname()
lowercase__ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowercase__ = dist.get_rank()
lowercase__ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase__ = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class A_ ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : str , lowercase_ : int = " " ) -> str:
UpperCAmelCase : Any = sentence_delimiter
def UpperCAmelCase_ ( self : Any , lowercase_ : List[str] ) -> Optional[Any]:
return list(__A )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Tuple ) -> Any:
UpperCAmelCase : Optional[int] = []
for sent_idx, sentence in enumerate(__A ):
chars.extend(self.process_string(__A ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__A ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowercase__ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase__ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase__ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowercase__ = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
lowercase__ = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[str]=False ) -> str:
if concatenate_texts:
return jiwer.compute_measures(
__A , __A , truth_transform=__A , hypothesis_transform=__A , )["wer"]
UpperCAmelCase : Dict = 0
UpperCAmelCase : str = 0
for prediction, reference in zip(__A , __A ):
UpperCAmelCase : Any = jiwer.compute_measures(
__A , __A , truth_transform=__A , hypothesis_transform=__A , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 714 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 0 |
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = len(UpperCamelCase__ )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
UpperCAmelCase : int = run_maze(UpperCamelCase__ , 0 , 0 , UpperCamelCase__ )
if solved:
print('\n'.join(str(UpperCamelCase__ ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = len(UpperCamelCase__ )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Tuple = 1
return True
UpperCAmelCase : int = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : Tuple = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Optional[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : List[str] = 1
# check for directions
if (
run_maze(UpperCamelCase__ , i + 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j + 1 , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j - 1 , UpperCamelCase__ )
):
return True
UpperCAmelCase : str = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : str = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_UpperCAmelCase ).json()
def UpperCamelCase( UpperCAmelCase_ = 10 ):
UpperCAmelCase : Optional[int] = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
UpperCAmelCase : Any = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def UpperCamelCase( UpperCAmelCase_ = 10 ):
UpperCAmelCase : int = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 716 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """vit_msn"""
def __init__( self : Tuple , lowercase_ : str=768 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=12 , lowercase_ : Tuple=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1E-06 , lowercase_ : Dict=224 , lowercase_ : str=16 , lowercase_ : int=3 , lowercase_ : Tuple=True , **lowercase_ : List[Any] , ) -> Any:
super().__init__(**__a )
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : Optional[int] = layer_norm_eps
UpperCAmelCase : Dict = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : Tuple = qkv_bias
| 717 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = """informer"""
UpperCAmelCase_ : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = None , lowercase_ : Optional[Union[str, bool]] = "mean" , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : bool = True , lowercase_ : str = "gelu" , lowercase_ : float = 0.05 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : List[str]=True , lowercase_ : str = "prob" , lowercase_ : int = 5 , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> List[Any]:
# time series specific configuration
UpperCAmelCase : List[Any] = prediction_length
UpperCAmelCase : Union[str, Any] = context_length or prediction_length
UpperCAmelCase : int = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Optional[int] = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Dict = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase : int = scaling
UpperCAmelCase : Tuple = num_dynamic_real_features
UpperCAmelCase : Optional[Any] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : str = cardinality
else:
UpperCAmelCase : int = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__A ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Optional[int] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : Optional[int] = d_model
UpperCAmelCase : Optional[int] = encoder_attention_heads
UpperCAmelCase : Dict = decoder_attention_heads
UpperCAmelCase : List[Any] = encoder_ffn_dim
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : Tuple = encoder_layers
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : Dict = dropout
UpperCAmelCase : Dict = attention_dropout
UpperCAmelCase : Optional[int] = activation_dropout
UpperCAmelCase : Optional[Any] = encoder_layerdrop
UpperCAmelCase : int = decoder_layerdrop
UpperCAmelCase : Optional[Any] = activation_function
UpperCAmelCase : str = init_std
UpperCAmelCase : Optional[int] = use_cache
# Informer
UpperCAmelCase : List[str] = attention_type
UpperCAmelCase : Optional[Any] = sampling_factor
UpperCAmelCase : Dict = distil
super().__init__(is_encoder_decoder=__A , **__A )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 718 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def UpperCamelCase( UpperCAmelCase_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase : Optional[int] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith('encoder' ):
UpperCAmelCase : List[str] = k.replace('.attn' , '.self_attn' )
UpperCAmelCase : Optional[Any] = k.replace('norm1' , 'self_attn_layer_norm' )
UpperCAmelCase : Dict = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
UpperCAmelCase : List[str] = k.replace('norm1' , 'self_attn_layer_norm' )
UpperCAmelCase : Optional[Any] = k.replace('norm2' , 'encoder_attn_layer_norm' )
UpperCAmelCase : Optional[Any] = k.replace('norm3' , 'final_layer_norm' )
return k
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
UpperCAmelCase : Union[str, Any] = sd.pop(_lowerCamelCase )
UpperCAmelCase : List[Any] = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
UpperCAmelCase : Union[str, Any] = v
lowercase__ = ["START"]
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = torch.load(_lowerCamelCase , map_location='cpu' )
UpperCAmelCase : int = model["""model"""]
UpperCAmelCase : Tuple = BlenderbotConfig.from_json_file(_lowerCamelCase )
UpperCAmelCase : Dict = BlenderbotForConditionalGeneration(_lowerCamelCase )
UpperCAmelCase : List[Any] = m.model.state_dict().keys()
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase : List[str] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase : List[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
lowercase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if height >= 1:
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
move_disk(lowercase_ , lowercase_ )
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
print('moving disk from' , lowercase_ , 'to' , lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Union[str, Any] = int(input('Height of hanoi: ' ).strip() )
move_tower(lowercase_ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase__ = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowercase__ = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = None
# source code of `config_class`
UpperCAmelCase : List[Any] = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase : Union[str, Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
UpperCAmelCase : Optional[int] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase : Dict = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase : List[Any] = ckpt_name
break
return checkpoint
def UpperCamelCase( ):
UpperCAmelCase : int = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase : int = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase : Tuple = '\n'.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don\'t contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 721 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : Any , lowercase_ : Any=13 , lowercase_ : Any=7 , lowercase_ : Any=True , lowercase_ : List[Any]=True , lowercase_ : int=True , lowercase_ : str=True , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : int=5 , lowercase_ : Dict=4 , lowercase_ : List[str]=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=512 , lowercase_ : str=16 , lowercase_ : List[Any]=2 , lowercase_ : Optional[int]=0.02 , lowercase_ : Any=4 , ) -> Any:
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : List[Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_attention_mask
UpperCAmelCase : List[Any] = use_token_type_ids
UpperCAmelCase : Tuple = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : int = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[str] = num_choices
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_attention_mask:
UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self : str ) -> Tuple:
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = config_and_inputs
UpperCAmelCase : int = True
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : List[str] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : str = FlaxBertModelTester(self )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> Any:
UpperCAmelCase : Union[str, Any] = FlaxBertModel.from_pretrained('bert-base-cased' )
UpperCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Optional[int] , lowercase_ : Any , lowercase_ : str=13 , lowercase_ : str=30 , lowercase_ : Optional[int]=2 , lowercase_ : Dict=3 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=True , lowercase_ : str=32 , lowercase_ : int=5 , lowercase_ : Optional[int]=4 , lowercase_ : Union[str, Any]=37 , lowercase_ : List[str]="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[str]=10 , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=2 , ) -> int:
UpperCAmelCase : int = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : Optional[Any] = image_size
UpperCAmelCase : Union[str, Any] = patch_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Any = use_labels
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : int = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2
UpperCAmelCase : Optional[Any] = num_patches + 1
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : Tuple ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : Any = ViTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Optional[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Tuple ) -> Optional[int]:
UpperCAmelCase : Tuple = ViTForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase : List[str] = 1
UpperCAmelCase : Tuple = ViTForMaskedImageModeling(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[str] = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.type_sequence_label_size
UpperCAmelCase : Optional[int] = ViTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Tuple = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Tuple = ViTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : List[Any] = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ : int = True
UpperCAmelCase_ : int = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[Any] = False
def UpperCAmelCase_ ( self : List[str] ) -> str:
UpperCAmelCase : Any = ViTModelTester(self )
UpperCAmelCase : str = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Tuple ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Dict = ViTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : List[str] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(lowercase_ )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=lowercase_ , return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(**lowercase_ )
# verify the logits
UpperCAmelCase : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase : Any = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
UpperCAmelCase : Any = ViTModel.from_pretrained('facebook/dino-vits8' ).to(lowercase_ )
UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
UpperCAmelCase : Optional[int] = prepare_img()
UpperCAmelCase : List[str] = image_processor(images=lowercase_ , return_tensors='pt' )
UpperCAmelCase : Optional[Any] = inputs.pixel_values.to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase : str = model(lowercase_ , interpolate_pos_encoding=lowercase_ )
# verify the logits
UpperCAmelCase : Union[str, Any] = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase_ )
UpperCAmelCase : List[Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self : List[Any] ) -> str:
UpperCAmelCase : Tuple = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=lowercase_ , return_tensors='pt' )
UpperCAmelCase : List[str] = inputs.pixel_values.to(lowercase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase : Dict = model(lowercase_ )
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowercase__ = logging.get_logger(__name__)
class A_ ( _UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""pixel_values"""]
def __init__( self : List[Any] , lowercase_ : Dict = True , lowercase_ : Dict = None , lowercase_ : Tuple = PILImageResampling.BILINEAR , lowercase_ : int = True , lowercase_ : List[Any] = 1 / 255 , lowercase_ : Optional[int] = True , lowercase_ : List[Any] = None , lowercase_ : Optional[int] = True , **lowercase_ : List[str] , ) -> Dict:
super().__init__(**lowercase__ )
UpperCAmelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase : Optional[int] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 256, "width": 256}
UpperCAmelCase : Dict = get_size_dict(lowercase__ , param_name='crop_size' )
UpperCAmelCase : Union[str, Any] = do_resize
UpperCAmelCase : Union[str, Any] = size
UpperCAmelCase : List[str] = resample
UpperCAmelCase : Dict = do_rescale
UpperCAmelCase : List[Any] = rescale_factor
UpperCAmelCase : Optional[Any] = do_center_crop
UpperCAmelCase : Dict = crop_size
UpperCAmelCase : str = do_flip_channel_order
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : int , lowercase_ : List[Any] = PIL.Image.BILINEAR , lowercase_ : Union[str, Any] = None , **lowercase_ : Optional[Any] , ) -> List[str]:
UpperCAmelCase : str = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase : List[Any] = get_resize_output_image_size(lowercase__ , size=size['shortest_edge'] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] = None , **lowercase_ : Dict , ) -> Tuple:
UpperCAmelCase : List[Any] = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase__ , size=(size['height'], size['width']) , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Any = None , **lowercase_ : List[Any] , ) -> Union[str, Any]:
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : List[str] , lowercase_ : Optional[int] = None ) -> Union[str, Any]:
return flip_channel_order(lowercase__ , data_format=lowercase__ )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[str] , lowercase_ : str = None , lowercase_ : str = None , lowercase_ : Any = None , lowercase_ : Optional[Any] = None , lowercase_ : List[str] = None , lowercase_ : Any = None , lowercase_ : int = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : List[Any] = ChannelDimension.FIRST , **lowercase_ : str , ) -> List[Any]:
UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : str = resample if resample is not None else self.resample
UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCAmelCase : Tuple = size if size is not None else self.size
UpperCAmelCase : List[str] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : Tuple = get_size_dict(lowercase__ , param_name='crop_size' )
UpperCAmelCase : Optional[Any] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase : Any = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
UpperCAmelCase : Optional[Any] = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
UpperCAmelCase : int = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
UpperCAmelCase : str = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCAmelCase : Optional[Any] = [self.flip_channel_order(image=lowercase__ ) for image in images]
UpperCAmelCase : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
UpperCAmelCase : int = {"pixel_values": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] = None ) -> int:
UpperCAmelCase : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase__ ):
UpperCAmelCase : int = target_sizes.numpy()
UpperCAmelCase : List[Any] = []
for idx in range(len(lowercase__ ) ):
UpperCAmelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase__ )
UpperCAmelCase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
UpperCAmelCase : Any = logits.argmax(dim=1 )
UpperCAmelCase : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Dict = mock.Mock()
UpperCAmelCase : List[str] = 500
UpperCAmelCase : List[str] = {}
UpperCAmelCase : Union[str, Any] = HTTPError
UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowercase_ ) as mock_head:
UpperCAmelCase : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase : Any = mock.Mock()
UpperCAmelCase : Dict = 500
UpperCAmelCase : str = {}
UpperCAmelCase : Tuple = HTTPError
UpperCAmelCase : Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase : int = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowercase_ ) as mock_head:
UpperCAmelCase : Union[str, Any] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
try:
UpperCAmelCase : Dict = tempfile.mktemp()
with open(lowercase_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , lowercase_ )
UpperCAmelCase : Any = AlbertTokenizer.from_pretrained(lowercase_ )
finally:
os.remove(lowercase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , lowercase_ )
UpperCAmelCase : int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def UpperCAmelCase_ ( self : int ) -> str:
UpperCAmelCase : Optional[int] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCAmelCase_ ( cls : str ) -> List[Any]:
UpperCAmelCase : str = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : List[str] ) -> Dict:
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : str = os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCAmelCase : List[Any] = BertTokenizer(lowercase_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCAmelCase : Optional[Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ , repo_id='test-tokenizer' , push_to_hub=lowercase_ , use_auth_token=self._token )
UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def UpperCAmelCase_ ( self : int ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Union[str, Any] = os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCAmelCase : Optional[Any] = BertTokenizer(lowercase_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowercase_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=lowercase_ , use_auth_token=self._token )
UpperCAmelCase : Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Dict = os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCAmelCase : List[str] = CustomTokenizer(lowercase_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCAmelCase : Any = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : int = os.path.join(lowercase_ , 'vocab.txt' )
with open(lowercase_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCAmelCase : str = BertTokenizerFast.from_pretrained(lowercase_ )
bert_tokenizer.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = CustomTokenizerFast.from_pretrained(lowercase_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
f"""{USER}/test-dynamic-tokenizer""" , use_fast=lowercase_ , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
UpperCAmelCase : Optional[Any] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
UpperCAmelCase : int = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Any = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : str = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Any = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
UpperCAmelCase : Any = Trie()
UpperCAmelCase : str = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowercase_ , ['AB', 'C'] )
| 703 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 10**-10 ):
UpperCAmelCase : str = a
while True:
UpperCAmelCase : Optional[Any] = Decimal(__lowercase ) - (
Decimal(eval(__lowercase ) ) / Decimal(eval(str(diff(__lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowercase ) ) < precision: # noqa: S307
return float(__lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class A_ ( __lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """t5"""
UpperCAmelCase_ : List[str] = ["""past_key_values"""]
UpperCAmelCase_ : Tuple = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : str , lowercase_ : List[str]=32_128 , lowercase_ : Tuple=512 , lowercase_ : List[Any]=64 , lowercase_ : Tuple=2_048 , lowercase_ : List[Any]=6 , lowercase_ : int=None , lowercase_ : List[str]=8 , lowercase_ : Optional[Any]=32 , lowercase_ : Tuple=128 , lowercase_ : Optional[int]=0.1 , lowercase_ : Dict=1E-6 , lowercase_ : List[Any]=1.0 , lowercase_ : Tuple="relu" , lowercase_ : Dict=True , lowercase_ : int=True , lowercase_ : Tuple=0 , lowercase_ : Optional[int]=1 , **lowercase_ : Tuple , ) -> str:
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Dict = d_model
UpperCAmelCase : Any = d_kv
UpperCAmelCase : str = d_ff
UpperCAmelCase : Optional[int] = num_layers
UpperCAmelCase : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase : int = num_heads
UpperCAmelCase : Dict = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : Tuple = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_factor
UpperCAmelCase : List[str] = feed_forward_proj
UpperCAmelCase : Dict = use_cache
UpperCAmelCase : str = self.feed_forward_proj.split('-' )
UpperCAmelCase : str = act_info[-1]
UpperCAmelCase : Optional[int] = act_info[0] == 'gated'
if len(_A ) > 1 and act_info[0] != "gated" or len(_A ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase : Tuple = 'gelu_new'
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
class A_ ( __lowercase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Dict:
UpperCAmelCase : Any = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
UpperCAmelCase : Dict = 'past_encoder_sequence + sequence'
UpperCAmelCase : Union[str, Any] = {0: 'batch'}
UpperCAmelCase : List[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCAmelCase : str = {0: 'batch', 1: 'decoder_sequence'}
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
return common_inputs
@property
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
return 13
| 705 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
lowercase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """whisper"""
UpperCAmelCase_ : Tuple = ["""past_key_values"""]
UpperCAmelCase_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str , lowercase_ : Any=51_865 , lowercase_ : List[Any]=80 , lowercase_ : int=6 , lowercase_ : Dict=4 , lowercase_ : List[Any]=6 , lowercase_ : Any=4 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=1_536 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : List[Any]=50_257 , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : str="gelu" , lowercase_ : List[str]=256 , lowercase_ : str=0.0 , lowercase_ : Any=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=1_500 , lowercase_ : List[Any]=448 , lowercase_ : int=50_256 , lowercase_ : Union[str, Any]=50_256 , lowercase_ : List[Any]=50_256 , lowercase_ : Tuple=None , lowercase_ : Optional[Any]=[220, 50_256] , lowercase_ : Tuple=False , lowercase_ : str=256 , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=0.05 , lowercase_ : Any=10 , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=10 , lowercase_ : int=0 , lowercase_ : Optional[int]=7 , **lowercase_ : Union[str, Any] , ) -> List[str]:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Any = num_mel_bins
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : str = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : int = dropout
UpperCAmelCase : int = attention_dropout
UpperCAmelCase : List[Any] = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Tuple = max_source_positions
UpperCAmelCase : List[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase : Optional[int] = classifier_proj_size
UpperCAmelCase : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : Optional[Any] = apply_spec_augment
UpperCAmelCase : Optional[Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : str = mask_time_min_masks
UpperCAmelCase : List[str] = mask_feature_prob
UpperCAmelCase : Tuple = mask_feature_length
UpperCAmelCase : Optional[int] = mask_feature_min_masks
UpperCAmelCase : str = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase : Optional[int] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase : int = {0: 'batch'}
else:
UpperCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 22_050 , lowercase_ : float = 5.0 , lowercase_ : int = 220 , ) -> Mapping[str, Any]:
UpperCAmelCase : Tuple = OrderedDict()
UpperCAmelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCAmelCase : Optional[Any] = encoder_inputs['input_features'].shape[2]
UpperCAmelCase : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase : List[str] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCAmelCase_ ( self : Dict ) -> float:
return 1E-3
| 695 | 0 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = tmp_path / 'cache'
UpperCAmelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Dict = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : List[Any] = features.copy() if features else default_expected_features
UpperCAmelCase : Union[str, Any] = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Dict = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = tmp_path / 'cache'
UpperCAmelCase : str = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
UpperCAmelCase : Optional[Any] = features.copy() if features else default_expected_features
UpperCAmelCase : List[Any] = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase : Optional[int] = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
UpperCAmelCase : int = features.copy()
UpperCAmelCase : Tuple = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Optional[Any] = tmp_path / 'cache'
UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = tmp_path / 'cache'
UpperCAmelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : str = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase : Dict = jsonl_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase : str = [jsonl_path]
UpperCAmelCase : Tuple = tmp_path / 'cache'
UpperCAmelCase : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Optional[Any] = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=("train",) ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
UpperCAmelCase : Optional[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = tmp_path / 'cache'
UpperCAmelCase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : int = JsonDatasetReader({'train': jsonl_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : List[str] = features.copy() if features else default_expected_features
UpperCAmelCase : List[Any] = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Optional[Any] = JsonDatasetReader({'train': jsonl_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if split:
UpperCAmelCase : Tuple = {split: jsonl_path}
else:
UpperCAmelCase : List[Any] = 'train'
UpperCAmelCase : Optional[Any] = {'train': jsonl_path, 'test': jsonl_path}
UpperCAmelCase : Dict = tmp_path / 'cache'
UpperCAmelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
UpperCAmelCase : Dict = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase( UpperCAmelCase_ ):
return json.load(lowerCAmelCase__ )
def UpperCamelCase( UpperCAmelCase_ ):
return [json.loads(lowerCAmelCase__ ) for line in buffer]
class A_ :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ ).write()
buffer.seek(0 )
UpperCAmelCase : Optional[Any] = load_json_function(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
assert isinstance(exported_content[0] , snake_case__ )
assert len(snake_case__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : List[str] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ , orient=snake_case__ ).write()
buffer.seek(0 )
UpperCAmelCase : str = load_json(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : List[str] = load_json_function(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
assert isinstance(exported_content[0] , snake_case__ )
assert len(snake_case__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : List[Any] ) -> List[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , lines=snake_case__ , orient=snake_case__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase : Union[str, Any] = load_json(snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case__ ) == 10
def UpperCAmelCase_ ( self : Any , lowercase_ : str ) -> str:
with pytest.raises(snake_case__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case__ , snake_case__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> Tuple:
UpperCAmelCase : Dict = tmp_path_factory.mktemp('data' ) / f"""test.json.{extension}"""
UpperCAmelCase : Union[str, Any] = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(snake_case__ , snake_case__ , compression=snake_case__ ).write()
with fsspec.open(snake_case__ , 'rb' , compression='infer' ) as f:
UpperCAmelCase : str = f.read()
with fsspec.open(snake_case__ , 'rb' , compression='infer' ) as f:
UpperCAmelCase : Dict = f.read()
assert exported_content == original_content
| 706 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase__ = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase( UpperCAmelCase_="no" , UpperCAmelCase_ = default_json_config_file , UpperCAmelCase_ = False ):
UpperCAmelCase : Any = Path(UpperCAmelCase_ )
path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCAmelCase : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCAmelCase : Dict = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase : Dict = torch.cuda.device_count()
UpperCAmelCase : List[Any] = num_gpus
UpperCAmelCase : List[Any] = False
if num_gpus > 1:
UpperCAmelCase : Tuple = 'MULTI_GPU'
else:
UpperCAmelCase : Optional[Any] = 'NO'
elif is_xpu_available() and use_xpu:
UpperCAmelCase : Optional[int] = torch.xpu.device_count()
UpperCAmelCase : Optional[int] = num_xpus
UpperCAmelCase : Any = False
if num_xpus > 1:
UpperCAmelCase : Tuple = 'MULTI_XPU'
else:
UpperCAmelCase : str = 'NO'
elif is_npu_available():
UpperCAmelCase : Optional[int] = torch.npu.device_count()
UpperCAmelCase : str = num_npus
UpperCAmelCase : int = False
if num_npus > 1:
UpperCAmelCase : int = 'MULTI_NPU'
else:
UpperCAmelCase : List[str] = 'NO'
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = True
UpperCAmelCase : str = 1
UpperCAmelCase : str = 'NO'
UpperCAmelCase : Any = ClusterConfig(**UpperCAmelCase_ )
config.to_json_file(UpperCAmelCase_ )
return path
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = parser.add_parser('default' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=UpperCAmelCase_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=UpperCAmelCase_ )
return parser
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ) , lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 707 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695 | 0 |
'''simple docstring'''
from math import sqrt
def UpperCamelCase( UpperCAmelCase_ = 1_00_00_00 ):
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : int = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 708 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase__ = logging.getLogger(__name__)
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : str
UpperCAmelCase_ : str
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : Optional[str] = None
@dataclass(frozen=_snake_case )
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[int]
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[List[int]] = None
UpperCAmelCase_ : Optional[Union[int, float]] = None
UpperCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : List[str] , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = None , lowercase_ : List[str]=False , lowercase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase : Dict = hans_processors[task]()
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(lowercase_ ) , lowercase_ , ) , )
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : int = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase : Tuple = torch.load(lowercase_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase : int = (
processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
)
logger.info('Training examples: %s' , len(lowercase_ ) )
UpperCAmelCase : Dict = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
logger.info('Saving features into cached file %s' , lowercase_ )
torch.save(self.features , lowercase_ )
def __len__( self : Union[str, Any] ) -> str:
return len(self.features )
def __getitem__( self : Dict , lowercase_ : Dict ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
UpperCAmelCase_ : List[InputFeatures]
def __init__( self : Tuple , lowercase_ : str , lowercase_ : PreTrainedTokenizer , lowercase_ : str , lowercase_ : Optional[int] = 128 , lowercase_ : int=False , lowercase_ : bool = False , ) -> Union[str, Any]:
UpperCAmelCase : int = hans_processors[task]()
UpperCAmelCase : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase : Tuple = label_list[2], label_list[1]
UpperCAmelCase : Any = label_list
UpperCAmelCase : str = processor.get_dev_examples(lowercase_ ) if evaluate else processor.get_train_examples(lowercase_ )
UpperCAmelCase : int = hans_convert_examples_to_features(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(lowercase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase : Optional[Any] = tf.data.Dataset.from_generator(
lowercase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
return self.dataset
def __len__( self : Tuple ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return self.label_list
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : int , lowercase_ : Optional[int] ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(lowercase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : str ) -> Dict:
UpperCAmelCase : Union[str, Any] = []
for i, line in enumerate(lowercase_ ):
if i == 0:
continue
UpperCAmelCase : Tuple = '%s-%s' % (set_type, line[0])
UpperCAmelCase : Tuple = line[5]
UpperCAmelCase : Dict = line[6]
UpperCAmelCase : Optional[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=lowercase_ , text_a=lowercase_ , text_b=lowercase_ , label=lowercase_ , pairID=lowercase_ ) )
return examples
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCAmelCase : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCAmelCase : List[str] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase : Any = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowercase__ = {
"hans": 3,
}
lowercase__ = {
"hans": HansProcessor,
}
| 695 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowercase__ = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
lowercase__ = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
lowercase__ = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int]=False ) -> List[Any]:
UpperCAmelCase : Tuple = spearmanr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = checkpoints.load_tax_checkpoint(A_ )
UpperCAmelCase : Optional[Any] = flatten_dict(A_ )
return flax_params
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = {}
UpperCAmelCase : int = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
UpperCAmelCase : Optional[Any] = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCAmelCase : Optional[int] = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCAmelCase : List[Any] = new_key.replace(A_ , A_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCAmelCase : Dict = new_key.replace(A_ , A_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCAmelCase : Tuple = re.sub(R'layers_(\d+)' , R'layer.\1' , A_ )
UpperCAmelCase : Any = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCAmelCase : List[Any] = re.sub(R'layers_(\d+)' , R'layer.\1' , A_ )
UpperCAmelCase : str = flax_dict[key]
UpperCAmelCase : Union[str, Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCAmelCase : int = torch.from_numpy(converted_dict[key].T )
else:
UpperCAmelCase : int = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=False ):
UpperCAmelCase : Union[str, Any] = get_flax_param(A_ )
if not use_large:
UpperCAmelCase : Tuple = PixaStructVisionConfig()
UpperCAmelCase : Tuple = PixaStructTextConfig()
else:
UpperCAmelCase : Optional[int] = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
UpperCAmelCase : Tuple = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
UpperCAmelCase : List[str] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=A_ )
UpperCAmelCase : List[str] = PixaStructForConditionalGeneration(A_ )
UpperCAmelCase : Union[str, Any] = rename_and_convert_flax_params(A_ )
model.load_state_dict(A_ )
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
UpperCAmelCase : List[str] = PixaStructImageProcessor()
UpperCAmelCase : Optional[Any] = PixaStructProcessor(image_processor=A_ , tokenizer=A_ )
if use_large:
UpperCAmelCase : Dict = 40_96
UpperCAmelCase : Dict = True
# mkdir if needed
os.makedirs(A_ , exist_ok=A_ )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
print('Model saved in {}'.format(A_ ) )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
lowercase__ : Optional[int] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 710 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
for attribute in key.split('.' ):
UpperCAmelCase : List[str] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
UpperCAmelCase : Dict = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
UpperCAmelCase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase : List[Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : Any = value
elif weight_type == "bias":
UpperCAmelCase : int = value
else:
UpperCAmelCase : int = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ) -> List[Any]:
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
UpperCAmelCase : Optional[int] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase : List[Any] = True
if "*" in mapped_key:
UpperCAmelCase : str = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
UpperCAmelCase : Any = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
UpperCAmelCase : Dict = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase : Optional[int] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : Union[str, Any] = 'weight'
else:
UpperCAmelCase : List[Any] = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = full_name.split('conv_layers.' )[-1]
UpperCAmelCase : int = name.split('.' )
UpperCAmelCase : Tuple = int(items[0] )
UpperCAmelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) -> Dict:
# load the pre-trained checkpoints
UpperCAmelCase : Optional[int] = torch.load(UpperCAmelCase_ )
UpperCAmelCase : str = WavLMConfigOrig(checkpoint['cfg'] )
UpperCAmelCase : Optional[int] = WavLMOrig(UpperCAmelCase_ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
UpperCAmelCase : Tuple = WavLMConfig.from_pretrained(UpperCAmelCase_ )
else:
UpperCAmelCase : Any = WavLMConfig()
UpperCAmelCase : Optional[int] = WavLMModel(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ )
hf_wavlm.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 711 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : str = """blip_2_vision_model"""
def __init__( self : List[str] , lowercase_ : int=1_408 , lowercase_ : Tuple=6_144 , lowercase_ : Dict=39 , lowercase_ : Optional[int]=16 , lowercase_ : str=224 , lowercase_ : Any=14 , lowercase_ : int="gelu" , lowercase_ : int=0.0_0001 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=1E-10 , lowercase_ : List[str]=True , **lowercase_ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : str = patch_size
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : str = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip_2_qformer"""
def __init__( self : Tuple , lowercase_ : Union[str, Any]=30_522 , lowercase_ : int=768 , lowercase_ : Dict=12 , lowercase_ : Dict=12 , lowercase_ : int=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-12 , lowercase_ : str=0 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Optional[int]=2 , lowercase_ : str=1_408 , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Dict = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls : List[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
UpperCAmelCase : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """blip-2"""
UpperCAmelCase_ : Any = True
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=None , lowercase_ : Dict=32 , **lowercase_ : Union[str, Any] ) -> Any:
super().__init__(**lowercase_ )
if vision_config is None:
UpperCAmelCase : Union[str, Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
UpperCAmelCase : Dict = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
UpperCAmelCase : str = BlipaVisionConfig(**lowercase_ )
UpperCAmelCase : str = BlipaQFormerConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
UpperCAmelCase : int = CONFIG_MAPPING[text_model_type](**lowercase_ )
UpperCAmelCase : Optional[int] = self.text_config.tie_word_embeddings
UpperCAmelCase : Dict = self.text_config.is_encoder_decoder
UpperCAmelCase : Tuple = num_query_tokens
UpperCAmelCase : Tuple = self.vision_config.hidden_size
UpperCAmelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Union[str, Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.02
@classmethod
def UpperCAmelCase_ ( cls : Dict , lowercase_ : BlipaVisionConfig , lowercase_ : BlipaQFormerConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] , ) -> Tuple:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : List[str] = self.text_config.to_dict()
UpperCAmelCase : str = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
assert isinstance(__snake_case , __snake_case ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase : Any = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(__snake_case )
else:
UpperCAmelCase : Any = sylvester(number - 1 )
UpperCAmelCase : List[str] = num - 1
UpperCAmelCase : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 712 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict , lowercase_ : Dict ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Dict ) -> Tuple:
UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def UpperCAmelCase_ ( self : List[str] , lowercase_ : int , lowercase_ : Any ) -> Optional[int]:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCAmelCase : List[str] = f"""facebook/wmt19-{pair}"""
UpperCAmelCase : Optional[int] = self.get_tokenizer(lowercase_ )
UpperCAmelCase : int = self.get_model(lowercase_ )
UpperCAmelCase : List[Any] = bleu_data[pair]['src']
UpperCAmelCase : Optional[int] = bleu_data[pair]['tgt']
UpperCAmelCase : Any = tokenizer(lowercase_ , return_tensors='pt' , truncation=lowercase_ , padding='longest' ).to(lowercase_ )
UpperCAmelCase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase : List[Any] = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
UpperCAmelCase : Any = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores['bleu'] , lowercase_ )
| 695 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = ["model.decoder.embed_positions.weights"]
def UpperCamelCase( UpperCAmelCase_ ):
if "emb" in name:
UpperCAmelCase : Any = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
UpperCAmelCase : Optional[int] = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
UpperCAmelCase : Any = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
UpperCAmelCase : Optional[int] = name.replace('linear2' , 'fc2' )
if "norm1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
UpperCAmelCase : List[Any] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
UpperCAmelCase : Any = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
UpperCAmelCase : List[str] = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Tuple = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = list(state_dict.keys() )
UpperCAmelCase : int = {}
for key in keys:
UpperCAmelCase : str = state_dict.pop(lowerCamelCase_ )
UpperCAmelCase : Union[str, Any] = rename_keys(lowerCamelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : List[Any] = val[:hidden_size, :]
UpperCAmelCase : Union[str, Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase( UpperCAmelCase_ ):
if checkpoint == "small":
# default config values
UpperCAmelCase : Optional[int] = 10_24
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Dict = 16
elif checkpoint == "medium":
UpperCAmelCase : int = 15_36
UpperCAmelCase : int = 48
UpperCAmelCase : int = 24
elif checkpoint == "large":
UpperCAmelCase : Optional[Any] = 20_48
UpperCAmelCase : Any = 48
UpperCAmelCase : Any = 32
else:
raise ValueError(F"""Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.""" )
UpperCAmelCase : Dict = MusicgenDecoderConfig(
hidden_size=lowerCamelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , )
return config
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="cpu" ):
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(lowerCamelCase_ , device=lowerCamelCase_ )
UpperCAmelCase : List[Any] = decoder_config_from_checkpoint(lowerCamelCase_ )
UpperCAmelCase : Any = fairseq_model.lm.state_dict()
UpperCAmelCase : Dict = rename_state_dict(
lowerCamelCase_ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : int = TaEncoderModel.from_pretrained('t5-base' )
UpperCAmelCase : Any = EncodecModel.from_pretrained('facebook/encodec_32khz' )
UpperCAmelCase : Dict = MusicgenForCausalLM(lowerCamelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase : int = decoder.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCAmelCase : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase_ , audio_encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase_ )
# check we can do a forward pass
UpperCAmelCase : str = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Tuple = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('t5-base' )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
UpperCAmelCase : str = MusicgenProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
# set the appropriate bos/pad token ids
UpperCAmelCase : int = 20_48
UpperCAmelCase : Optional[int] = 20_48
# set other default generation config params
UpperCAmelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : List[str] = True
UpperCAmelCase : str = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowerCamelCase_ )
processor.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
UpperCAmelCase_ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase_ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : int ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
UpperCAmelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCAmelCase : Optional[int] = CLIPTextModel(UpperCamelCase_ )
UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : List[str]=0 ) -> Dict:
UpperCAmelCase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : List[Any] = self.get_dummy_components()
UpperCAmelCase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCAmelCase : str = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase : str = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase : str = sd_pipe(**UpperCamelCase_ ).images
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Tuple = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCAmelCase : List[str] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase : Tuple = 'french fries'
UpperCAmelCase : List[str] = sd_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
UpperCAmelCase : int = output.images
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : str = self.get_dummy_components()
UpperCAmelCase : Any = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCAmelCase : List[Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase : List[str] = [inputs['prompt']] * 2
UpperCAmelCase : Optional[Any] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
UpperCAmelCase : Optional[int] = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
UpperCAmelCase : Any = image / 2 + 0.5
UpperCAmelCase : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase : Any = image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase : Dict = sd_pipe(**UpperCamelCase_ ).images
UpperCAmelCase : int = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
UpperCAmelCase : str = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCAmelCase : Union[str, Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCAmelCase : Dict = sd_pipe(**UpperCamelCase_ ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase : Dict = [round(UpperCamelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(UpperCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Optional[int] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCAmelCase : Any = VaeImageProcessor(do_resize=UpperCamelCase_ , do_normalize=UpperCamelCase_ )
UpperCAmelCase : Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type='pt' ) )[0]
UpperCAmelCase : Any = components['vae']
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase : Any = pipe(**UpperCamelCase_ )[0]
UpperCAmelCase : int = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase_ , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str=0 ) -> int:
UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_ )
UpperCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
UpperCAmelCase : Optional[int] = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
UpperCAmelCase : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Tuple = self.get_inputs()
UpperCAmelCase : Dict = pipe(**UpperCamelCase_ ).images
UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
UpperCAmelCase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase_ )
UpperCAmelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[int] = self.get_inputs()
UpperCAmelCase : Union[str, Any] = pipe(**UpperCamelCase_ ).images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : str = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase_ )
UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[Any] = self.get_inputs()
UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ ).images
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase_ ( self : List[str] ) -> str:
UpperCAmelCase : Tuple = 0
def callback_fn(lowercase_ : int , lowercase_ : int , lowercase_ : torch.FloatTensor ) -> None:
UpperCAmelCase : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase : Any = latents[0, -3:, -3:, -1]
UpperCAmelCase : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase : Optional[int] = latents[0, -3:, -3:, -1]
UpperCAmelCase : int = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase : int = False
UpperCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCAmelCase : List[str] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : Tuple = self.get_inputs()
pipe(**UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCAmelCase : Any = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : Tuple = self.get_inputs()
UpperCAmelCase : str = pipe(**UpperCamelCase_ )
UpperCAmelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCAmelCase_ ( self : str ) -> Tuple:
UpperCAmelCase : Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase : Tuple = inputs['image'].resize((504, 504) )
UpperCAmelCase : Optional[int] = 'timbrooks/instruct-pix2pix'
UpperCAmelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCAmelCase : List[str] = pipe(**UpperCamelCase_ )
UpperCAmelCase : Any = output.images[0]
UpperCAmelCase : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCAmelCase : List[Any] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 714 |
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , *lowercase_ : Tuple , **lowercase_ : int ) -> Tuple:
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 715 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if dataset.ndim != value_array.ndim:
UpperCAmelCase : str = (
'Wrong input data\'s dimensions... '
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(UpperCAmelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase : str = (
'Wrong input data\'s shape... '
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(UpperCAmelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase : List[str] = (
'Input data have different datatype... '
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(UpperCAmelCase_ )
UpperCAmelCase : str = []
for value in value_array:
UpperCAmelCase : Optional[Any] = euclidean(UpperCAmelCase_ , dataset[0] )
UpperCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase : Tuple = euclidean(UpperCAmelCase_ , UpperCAmelCase_ )
if dist > temp_dist:
UpperCAmelCase : List[str] = temp_dist
UpperCAmelCase : str = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) / (norm(UpperCAmelCase_ ) * norm(UpperCAmelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase__ = pd.read_csv("sample_data.csv", header=None)
lowercase__ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase__ = df.iloc[:, 1:2]
lowercase__ = actual_data.values.reshape(len_data, 1)
lowercase__ = MinMaxScaler().fit_transform(actual_data)
lowercase__ = 10
lowercase__ = 5
lowercase__ = 20
lowercase__ = len_data - periods * look_back
lowercase__ = actual_data[:division]
lowercase__ = actual_data[division - look_back :]
lowercase__ , lowercase__ = [], []
lowercase__ , lowercase__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase__ = np.array(train_x)
lowercase__ = np.array(test_x)
lowercase__ = np.array([list(i.ravel()) for i in train_y])
lowercase__ = np.array([list(i.ravel()) for i in test_y])
lowercase__ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
lowercase__ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowercase__ = model.predict(x_test)
| 716 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("fixtures")
lowercase__ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase__ = get_tests_dir("fixtures/dummy-config.json")
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Optional[Any] = 0
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ ).to_dict()
config_dict.pop('feature_extractor_type' )
UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(**lowercase_ )
# save in new folder
model_config.save_pretrained(lowercase_ )
config.save_pretrained(lowercase_ )
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase_ )
# make sure private variable is not incorrectly saved
UpperCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained('bert-base' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : str = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Dict = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase_ ( self : int ) -> Tuple:
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = True
try:
AutoConfig.register('custom' , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_ )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowercase_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 695 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase__ = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase_ ( cls : int ) -> int:
UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def UpperCAmelCase_ ( cls : List[str] ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Any = FlaxBertModel(lowerCAmelCase_ )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
UpperCAmelCase : Tuple = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase_ , repo_id='test-model-flax' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
UpperCAmelCase : Optional[int] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" )
UpperCAmelCase : Optional[int] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=f"""{key} not identical""" )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
UpperCAmelCase : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase : Optional[Any] = FlaxBertModel(lowerCAmelCase_ )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
UpperCAmelCase : str = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=f"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase_ , repo_id='valid_org/test-model-flax-org' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
UpperCAmelCase : Any = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 , msg=f"""{key} not identical""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase : int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCAmelCase : Union[str, Any] = False
return models_are_equal
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : int = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCAmelCase : int = FlaxBertModel(lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
with self.assertRaises(lowerCAmelCase_ ):
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase : str = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : str = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCAmelCase : Dict = FlaxBertModel(lowerCAmelCase_ )
UpperCAmelCase : List[str] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , max_shard_size='10KB' )
with self.assertRaises(lowerCAmelCase_ ):
UpperCAmelCase : Any = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase : int = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertTrue(check_models_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : int = 'bert'
UpperCAmelCase : Tuple = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(lowerCAmelCase_ ):
UpperCAmelCase : str = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase : List[Any] = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def UpperCAmelCase_ ( self : str ) -> Dict:
UpperCAmelCase : str = 'bert'
UpperCAmelCase : Tuple = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(lowerCAmelCase_ ):
UpperCAmelCase : Optional[Any] = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase : Tuple = FlaxBertModel.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
| 717 |
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCAmelCase : List[str] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
lowercase__ = input("Enter Video/IGTV url: ").strip()
lowercase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 695 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class A_ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any]=13 , lowercase_ : Dict=7 , lowercase_ : Any=True , lowercase_ : Tuple=True , lowercase_ : str=True , lowercase_ : Union[str, Any]=True , lowercase_ : str=99 , lowercase_ : Optional[Any]=32 , lowercase_ : Optional[Any]=2 , lowercase_ : Tuple=4 , lowercase_ : Optional[Any]=37 , lowercase_ : Tuple="gelu" , lowercase_ : Any=0.1 , lowercase_ : str=0.1 , lowercase_ : Dict=512 , lowercase_ : int=16 , lowercase_ : Tuple=2 , lowercase_ : int=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=4 , lowercase_ : Dict=None , lowercase_ : Dict=1_000 , ) -> Dict:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : Any = seq_length
UpperCAmelCase : Dict = is_training
UpperCAmelCase : Optional[int] = use_input_mask
UpperCAmelCase : Tuple = use_token_type_ids
UpperCAmelCase : Any = use_labels
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : str = scope
UpperCAmelCase : str = range_bbox
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase : List[Any] = bbox[i, j, 3]
UpperCAmelCase : Tuple = bbox[i, j, 1]
UpperCAmelCase : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase : int = bbox[i, j, 2]
UpperCAmelCase : List[Any] = bbox[i, j, 0]
UpperCAmelCase : List[Any] = t
UpperCAmelCase : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ )
UpperCAmelCase : List[Any] = None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Tuple = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : str = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : int ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = TFLayoutLMModel(config=UpperCAmelCase__ )
UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
UpperCAmelCase : Tuple = model(UpperCAmelCase__ , UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
UpperCAmelCase : Dict = model(UpperCAmelCase__ , UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Union[str, Any] = TFLayoutLMForMaskedLM(config=UpperCAmelCase__ )
UpperCAmelCase : Optional[int] = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Dict , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : Any , lowercase_ : Any , lowercase_ : Optional[Any] ) -> List[str]:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[Any] = TFLayoutLMForSequenceClassification(config=UpperCAmelCase__ )
UpperCAmelCase : Any = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : int = TFLayoutLMForTokenClassification(config=UpperCAmelCase__ )
UpperCAmelCase : Optional[int] = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = TFLayoutLMForQuestionAnswering(config=UpperCAmelCase__ )
UpperCAmelCase : Dict = model(UpperCAmelCase__ , UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
UpperCAmelCase
) : Any = config_and_inputs
UpperCAmelCase : Union[str, Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ : Dict = (
{
'feature-extraction': TFLayoutLMModel,
'fill-mask': TFLayoutLMForMaskedLM,
'text-classification': TFLayoutLMForSequenceClassification,
'token-classification': TFLayoutLMForTokenClassification,
'zero-shot': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[str] = 10
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : Any = TFLayoutLMModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : str ) -> str:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = TFLayoutLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
pass
def UpperCamelCase( ):
UpperCAmelCase : Dict = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
UpperCAmelCase : Optional[int] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
UpperCAmelCase : int = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
UpperCAmelCase : str = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
UpperCAmelCase : List[Any] = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
UpperCAmelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase : Dict = model(input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
# test the sequence output on [0, :3, :3]
UpperCAmelCase : Tuple = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-3 ) )
# test the pooled output on [1, :3]
UpperCAmelCase : Dict = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
UpperCAmelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase : Tuple = model(
input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
UpperCAmelCase : Optional[Any] = outputs.loss
UpperCAmelCase : Optional[int] = (2,)
self.assertEqual(loss.shape , UpperCAmelCase__ )
# test the shape of the logits
UpperCAmelCase : Union[str, Any] = outputs.logits
UpperCAmelCase : Optional[int] = (2, 2)
self.assertEqual(logits.shape , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
UpperCAmelCase : List[str] = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=13 )
UpperCAmelCase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase : Tuple = model(
input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
# test the shape of the logits
UpperCAmelCase : str = outputs.logits
UpperCAmelCase : List[str] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : str ) -> str:
UpperCAmelCase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
UpperCAmelCase : str = prepare_layoutlm_batch_inputs()
# forward pass
UpperCAmelCase : Union[str, Any] = model(input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
# test the shape of the logits
UpperCAmelCase : Optional[int] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , UpperCAmelCase__ )
self.assertEqual(outputs.end_logits.shape , UpperCAmelCase__ )
| 718 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10**9 ):
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCAmelCase : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : List[Any] = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
UpperCAmelCase : str = {
"input_ids": tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCAmelCase : Union[str, Any] = model(snake_case__ )["last_hidden_state"]
UpperCAmelCase : Optional[int] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , snake_case__ )
# compare the actual values for a slice.
UpperCAmelCase : Tuple = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 719 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695 | 0 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCamelCase( *UpperCAmelCase_ ):
if not isinstance(__a , __a ):
UpperCAmelCase : List[Any] = list(__a )
for i in range(len(__a ) ):
UpperCAmelCase : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(__a , __a ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCamelCase( UpperCAmelCase_ = None , UpperCAmelCase_ = 1_28 ):
if function is None:
return functools.partial(__a , starting_batch_size=__a )
UpperCAmelCase : Dict = starting_batch_size
def decorator(*UpperCAmelCase_ , **UpperCAmelCase_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase : Union[str, Any] = list(inspect.signature(__a ).parameters.keys() )
# Guard against user error
if len(__a ) < (len(__a ) + 1):
UpperCAmelCase : Optional[int] = ', '.join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(__a , *__a , **__a )
except Exception as e:
if should_reduce_batch_size(__a ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowercase__ = True
except (ImportError, ModuleNotFoundError):
lowercase__ = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCamelCase( UpperCAmelCase_ ):
re.sub('<n>' , '' , UpperCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase_ ) )
| 721 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = """autoformer"""
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "student_t" , lowercase_ : str = "nll" , lowercase_ : int = 1 , lowercase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowercase_ : bool = True , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : Optional[List[int]] = None , lowercase_ : Optional[List[int]] = None , lowercase_ : int = 64 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 2 , lowercase_ : int = 32 , lowercase_ : int = 32 , lowercase_ : str = "gelu" , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , lowercase_ : int = 100 , lowercase_ : float = 0.02 , lowercase_ : bool = True , lowercase_ : Union[str, Any]=True , lowercase_ : int = 10 , lowercase_ : int = 25 , lowercase_ : int = 3 , **lowercase_ : str , ) -> Dict:
# time series specific configuration
UpperCAmelCase : int = prediction_length
UpperCAmelCase : Optional[Any] = context_length if context_length is not None else prediction_length
UpperCAmelCase : List[Any] = distribution_output
UpperCAmelCase : Tuple = loss
UpperCAmelCase : Dict = input_size
UpperCAmelCase : Dict = num_time_features
UpperCAmelCase : Tuple = lags_sequence
UpperCAmelCase : str = scaling
UpperCAmelCase : Optional[int] = num_dynamic_real_features
UpperCAmelCase : List[str] = num_static_real_features
UpperCAmelCase : Optional[int] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : int = cardinality
else:
UpperCAmelCase : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase : Any = embedding_dimension
else:
UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase : Optional[int] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : str = decoder_ffn_dim
UpperCAmelCase : str = encoder_layers
UpperCAmelCase : Optional[Any] = decoder_layers
UpperCAmelCase : int = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Tuple = activation_dropout
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Union[str, Any] = decoder_layerdrop
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : Dict = init_std
UpperCAmelCase : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase : Any = label_length
UpperCAmelCase : List[Any] = moving_average
UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self : List[str] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 695 | 0 |
'''simple docstring'''
class A_ : # Public class to implement a graph
'''simple docstring'''
def __init__( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]] ) -> List[str]:
UpperCAmelCase : Dict = row
UpperCAmelCase : Union[str, Any] = col
UpperCAmelCase : int = graph
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]] ) -> List[Any]:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase_ ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : list[list[bool]] ) -> int:
UpperCAmelCase : Optional[Any] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase : Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Any: # And finally, count all islands.
UpperCAmelCase : Tuple = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
count += 1
return count
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Dict ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCAmelCase : Dict = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler('sample_euler' )
UpperCAmelCase : List[str] = 'A painting of a squirrel eating a burger'
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Tuple = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
UpperCAmelCase : Optional[Any] = output.images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[Any] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
UpperCAmelCase : str = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCAmelCase : List[str] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler('sample_euler' )
UpperCAmelCase : Tuple = 'A painting of a squirrel eating a burger'
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
UpperCAmelCase : int = output.images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCAmelCase : Optional[int] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
UpperCAmelCase : Dict = 'A painting of a squirrel eating a burger'
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : List[str] = sd_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=_lowerCamelCase , )
UpperCAmelCase : Union[str, Any] = output.images
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : Dict = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
lowercase__ = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = 1 , UpperCAmelCase_ = "new" , UpperCAmelCase_ = None ):
UpperCAmelCase : Union[str, Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCamelCase_ ) - valid_terms ) ):
UpperCAmelCase : Tuple = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(lowerCamelCase_ )
UpperCAmelCase : Optional[Any] = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_29:
raise requests.HTTPError
UpperCAmelCase : Any = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCamelCase_ )}
UpperCAmelCase : int = {}
for id_ in range(lowerCamelCase_ ):
UpperCAmelCase : Any = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = len(UpperCamelCase__ ) + 1
UpperCAmelCase : int = len(UpperCamelCase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase : Union[str, Any] = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
# since string of zero length match pattern of zero length
UpperCAmelCase : List[str] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , UpperCamelCase__ ):
UpperCAmelCase : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , UpperCamelCase__ ):
UpperCAmelCase : int = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , UpperCamelCase__ ):
for j in range(1 , UpperCamelCase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase : str = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase : Tuple = dp[i - 1][j]
else:
UpperCAmelCase : int = 0
else:
UpperCAmelCase : Tuple = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowercase__ = "aab"
lowercase__ = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 703 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 60_08_51_47_51_43 ):
try:
UpperCAmelCase : int = int(__SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
UpperCAmelCase : str = 2
UpperCAmelCase : Optional[int] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase : Union[str, Any] = i
while n % i == 0:
UpperCAmelCase : Optional[int] = n // i
i += 1
return int(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 704 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.